]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
Merge tag 'drm-next-2020-06-02' of git://anongit.freedesktop.org/drm/drm
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 2 Jun 2020 22:04:15 +0000 (15:04 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 2 Jun 2020 22:04:15 +0000 (15:04 -0700)
Pull drm updates from Dave Airlie:
 "Highlights:

   - Core DRM had a lot of refactoring around managed drm resources to
     make drivers simpler.

   - Intel Tigerlake support is on by default

   - amdgpu now support p2p PCI buffer sharing and encrypted GPU memory

  Details:

  core:
   - uapi: error out EBUSY when existing master
   - uapi: rework SET/DROP MASTER permission handling
   - remove drm_pci.h
   - drm_pci* are now legacy
   - introduced managed DRM resources
   - subclassing support for drm_framebuffer
   - simple encoder helper
   - edid improvements
   - vblank + writeback documentation improved
   - drm/mm - optimise tree searches
   - port drivers to use devm_drm_dev_alloc

  dma-buf:
   - add flag for p2p buffer support

  mst:
   - ACT timeout improvements
   - remove drm_dp_mst_has_audio
   - don't use 2nd TX slot - spec recommends against it

  bridge:
   - dw-hdmi various improvements
   - chrontel ch7033 support
   - fix stack issues with old gcc

  hdmi:
   - add unpack function for drm infoframe

  fbdev:
   - misc fbdev driver fixes

  i915:
   - uapi: global sseu pinning
   - uapi: OA buffer polling
   - uapi: remove generated perf code
   - uapi: per-engine default property values in sysfs
   - Tigerlake GEN12 enabled.
   - Lots of gem refactoring
   - Tigerlake enablement patches
   - move to drm_device logging
   - Icelake gamma HW readout
   - push MST link retrain to hotplug work
   - bandwidth atomic helpers
   - ICL fixes
   - RPS/GT refactoring
   - Cherryview full-ppgtt support
   - i915 locking guidelines documented
   - require linear fb stride to be 512 multiple on gen9
   - Tigerlake SAGV support

  amdgpu:
   - uapi: encrypted GPU memory handling
   - uapi: add MEM_SYNC IB flag
   - p2p dma-buf support
   - export VRAM dma-bufs
   - FRU chip access support
   - RAS/SR-IOV updates
   - Powerplay locking fixes
   - VCN DPG (powergating) enablement
   - GFX10 clockgating fixes
   - DC fixes
   - GPU reset fixes
   - navi SDMA fix
   - expose FP16 for modesetting
   - DP 1.4 compliance fixes
   - gfx10 soft recovery
   - Improved Critical Thermal Faults handling
   - resizable BAR on gmc10

  amdkfd:
   - uapi: GWS resource management
   - track GPU memory per process
   - report PCI domain in topology

  radeon:
   - safe reg list generator fixes

  nouveau:
   - HD audio fixes on recent systems
   - vGPU detection (fail probe if we're on one, for now)
   - Interlaced mode fixes (mostly avoidance on Turing, which doesn't support it)
   - SVM improvements/fixes
   - NVIDIA format modifier support
   - Misc other fixes.

  adv7511:
   - HDMI SPDIF support

  ast:
   - allocate crtc state size
   - fix double assignment
   - fix suspend

  bochs:
   - drop connector register

  cirrus:
   - move to tiny drivers.

  exynos:
   - fix imported dma-buf mapping
   - enable runtime PM
   - fixes and cleanups

  mediatek:
   - DPI pin mode swap
   - config mipi_tx current/impedance

  lima:
   - devfreq + cooling device support
   - task handling improvements
   - runtime PM support

  pl111:
   - vexpress init improvements
   - fix module auto-load

  rcar-du:
   - DT bindings conversion to YAML
   - Planes zpos sanity check and fix
   - MAINTAINERS entry for LVDS panel driver

  mcde:
   - fix return value

  mgag200:
   - use managed config init

  stm:
   - read endpoints from DT

  vboxvideo:
   - use PCI managed functions
   - drop WC mtrr

  vkms:
   - enable cursor by default

  rockchip:
   - afbc support

  virtio:
   - various cleanups

  qxl:
   - fix cursor notify port

  hisilicon:
   - 128-byte stride alignment fix

  sun4i:
   - improved format handling"

* tag 'drm-next-2020-06-02' of git://anongit.freedesktop.org/drm/drm: (1401 commits)
  drm/amd/display: Fix potential integer wraparound resulting in a hang
  drm/amd/display: drop cursor position check in atomic test
  drm/amdgpu: fix device attribute node create failed with multi gpu
  drm/nouveau: use correct conflicting framebuffer API
  drm/vblank: Fix -Wformat compile warnings on some arches
  drm/amdgpu: Sync with VM root BO when switching VM to CPU update mode
  drm/amd/display: Handle GPU reset for DC block
  drm/amdgpu: add apu flags (v2)
  drm/amd/powerpay: Disable gfxoff when setting manual mode on picasso and raven
  drm/amdgpu: fix pm sysfs node handling (v2)
  drm/amdgpu: move gpu_info parsing after common early init
  drm/amdgpu: move discovery gfx config fetching
  drm/nouveau/dispnv50: fix runtime pm imbalance on error
  drm/nouveau: fix runtime pm imbalance on error
  drm/nouveau: fix runtime pm imbalance on error
  drm/nouveau/debugfs: fix runtime pm imbalance on error
  drm/nouveau/nouveau/hmm: fix migrate zero page to GPU
  drm/nouveau/nouveau/hmm: fix nouveau_dmem_chunk allocations
  drm/nouveau/kms/nv50-: Share DP SST mode_valid() handling with MST
  drm/nouveau/kms/nv50-: Move 8BPC limit for MST into nv50_mstc_get_modes()
  ...

1191 files changed:
Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml
Documentation/devicetree/bindings/display/bridge/adi,adv7123.txt [deleted file]
Documentation/devicetree/bindings/display/bridge/anx6345.yaml
Documentation/devicetree/bindings/display/bridge/chrontel,ch7033.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/bridge/dumb-vga-dac.txt [deleted file]
Documentation/devicetree/bindings/display/bridge/dw_mipi_dsi.txt [deleted file]
Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml
Documentation/devicetree/bindings/display/bridge/nwl-dsi.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/bridge/ps8640.yaml
Documentation/devicetree/bindings/display/bridge/simple-bridge.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/bridge/snps,dw-mipi-dsi.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/bridge/thine,thc63lvd1024.txt [deleted file]
Documentation/devicetree/bindings/display/bridge/thine,thc63lvd1024.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/bridge/ti,ths813x.txt [deleted file]
Documentation/devicetree/bindings/display/dsi-controller.yaml
Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt
Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt
Documentation/devicetree/bindings/display/panel/arm,versatile-tft-panel.txt [deleted file]
Documentation/devicetree/bindings/display/panel/arm,versatile-tft-panel.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/asus,z00t-tm5p5-nt35596.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/boe,himax8279d.txt [deleted file]
Documentation/devicetree/bindings/display/panel/boe,himax8279d.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/boe,tv101wum-nl6.yaml
Documentation/devicetree/bindings/display/panel/display-timings.yaml
Documentation/devicetree/bindings/display/panel/feiyang,fy07024di26a30d.txt [deleted file]
Documentation/devicetree/bindings/display/panel/feiyang,fy07024di26a30d.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/ilitek,ili9322.txt [deleted file]
Documentation/devicetree/bindings/display/panel/ilitek,ili9322.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.txt [deleted file]
Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/innolux,p097pfg.txt [deleted file]
Documentation/devicetree/bindings/display/panel/innolux,p097pfg.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/innolux,p120zdg-bf1.txt [deleted file]
Documentation/devicetree/bindings/display/panel/innolux,p120zdg-bf1.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/jdi,lt070me05000.txt [deleted file]
Documentation/devicetree/bindings/display/panel/jdi,lt070me05000.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/kingdisplay,kd035g6-54nt.txt [deleted file]
Documentation/devicetree/bindings/display/panel/kingdisplay,kd035g6-54nt.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/kingdisplay,kd097d04.txt [deleted file]
Documentation/devicetree/bindings/display/panel/leadtek,ltk050h3146w.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/lg,acx467akm-7.txt [deleted file]
Documentation/devicetree/bindings/display/panel/lg,ld070wx3-sl01.txt [deleted file]
Documentation/devicetree/bindings/display/panel/lg,lg4573.txt [deleted file]
Documentation/devicetree/bindings/display/panel/lg,lg4573.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/lg,lh500wx1-sd03.txt [deleted file]
Documentation/devicetree/bindings/display/panel/lgphilips,lb035q02.txt [deleted file]
Documentation/devicetree/bindings/display/panel/lgphilips,lb035q02.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.txt [deleted file]
Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/osddisplays,osd101t2587-53ts.txt [deleted file]
Documentation/devicetree/bindings/display/panel/panel-common.yaml
Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml
Documentation/devicetree/bindings/display/panel/panel-simple.yaml
Documentation/devicetree/bindings/display/panel/raydium,rm67191.txt [deleted file]
Documentation/devicetree/bindings/display/panel/raydium,rm67191.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/samsung,amoled-mipi-dsi.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/samsung,ld9040.txt [deleted file]
Documentation/devicetree/bindings/display/panel/samsung,ld9040.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/samsung,s6d16d0.txt [deleted file]
Documentation/devicetree/bindings/display/panel/samsung,s6d16d0.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/samsung,s6e3ha2.txt [deleted file]
Documentation/devicetree/bindings/display/panel/samsung,s6e63j0x03.txt [deleted file]
Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.txt [deleted file]
Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/seiko,43wvf1g.txt [deleted file]
Documentation/devicetree/bindings/display/panel/seiko,43wvf1g.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/sharp,lq150x1lg11.txt [deleted file]
Documentation/devicetree/bindings/display/panel/sharp,lq150x1lg11.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/sharp,ls037v7dw01.txt [deleted file]
Documentation/devicetree/bindings/display/panel/sharp,ls037v7dw01.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/sharp,ls043t1le01.txt [deleted file]
Documentation/devicetree/bindings/display/panel/sharp,ls043t1le01.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/simple-panel.txt [deleted file]
Documentation/devicetree/bindings/display/panel/sitronix,st7701.txt [deleted file]
Documentation/devicetree/bindings/display/panel/sitronix,st7701.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/sitronix,st7789v.txt [deleted file]
Documentation/devicetree/bindings/display/panel/sitronix,st7789v.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/sony,acx565akm.txt [deleted file]
Documentation/devicetree/bindings/display/panel/sony,acx565akm.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/startek,startek-kd050c.txt [deleted file]
Documentation/devicetree/bindings/display/panel/startek,startek-kd050c.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/tpo,td.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/tpo,td028ttec1.txt [deleted file]
Documentation/devicetree/bindings/display/panel/tpo,td043mtea1.txt [deleted file]
Documentation/devicetree/bindings/display/panel/visionox,rm69299.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/renesas,du.txt
Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.txt [deleted file]
Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt [deleted file]
Documentation/devicetree/bindings/display/rockchip/rockchip-vop.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/vendor-prefixes.yaml
Documentation/gpu/amdgpu.rst
Documentation/gpu/drm-internals.rst
Documentation/gpu/drm-kms.rst
Documentation/gpu/drm-mm.rst
Documentation/gpu/i915.rst
Documentation/gpu/todo.rst
MAINTAINERS
arch/arm/mach-sa1100/shannon.c
drivers/char/agp/intel-gtt.c
drivers/dma-buf/Makefile
drivers/dma-buf/dma-buf.c
drivers/dma-buf/dma-fence-chain.c
drivers/dma-buf/dma-fence.c
drivers/dma-buf/selftests.h
drivers/dma-buf/st-dma-fence-chain.c [new file with mode: 0644]
drivers/gpu/drm/Kconfig
drivers/gpu/drm/Makefile
drivers/gpu/drm/amd/amdgpu/Makefile
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
drivers/gpu/drm/amd/amdgpu/atom.c
drivers/gpu/drm/amd/amdgpu/cik.c
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
drivers/gpu/drm/amd/amdgpu/cikd.h
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
drivers/gpu/drm/amd/amdgpu/dce_virtual.c
drivers/gpu/drm/amd/amdgpu/df_v3_6.c
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c [changed mode: 0644->0755]
drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c [changed mode: 0644->0755]
drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h
drivers/gpu/drm/amd/amdgpu/navi10_ih.c
drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
drivers/gpu/drm/amd/amdgpu/nv.c
drivers/gpu/drm/amd/amdgpu/nvd.h
drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
drivers/gpu/drm/amd/amdgpu/si.c
drivers/gpu/drm/amd/amdgpu/si_dma.c
drivers/gpu/drm/amd/amdgpu/si_dpm.c
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/amdgpu/soc15_common.h
drivers/gpu/drm/amd/amdgpu/soc15d.h
drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
drivers/gpu/drm/amd/amdgpu/vi.c
drivers/gpu/drm/amd/amdgpu/vid.h
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
drivers/gpu/drm/amd/amdkfd/kfd_crat.c
drivers/gpu/drm/amd/amdkfd/kfd_device.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
drivers/gpu/drm/amd/amdkfd/kfd_process.c
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
drivers/gpu/drm/amd/amdkfd/kfd_topology.h
drivers/gpu/drm/amd/display/Kconfig
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
drivers/gpu/drm/amd/display/dc/basics/Makefile
drivers/gpu/drm/amd/display/dc/bios/command_table2.c
drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drivers/gpu/drm/amd/display/dc/core/dc_sink.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
drivers/gpu/drm/amd/display/dc/dc_dp_types.h
drivers/gpu/drm/amd/display/dc/dc_helper.c
drivers/gpu/drm/amd/display/dc/dc_link.h
drivers/gpu/drm/amd/display/dc/dc_stream.h
drivers/gpu/drm/amd/display/dc/dc_types.h
drivers/gpu/drm/amd/display/dc/dce/Makefile
drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/dce/dmub_abm.h [moved from drivers/gpu/drm/amd/display/dc/basics/log_helpers.c with 75% similarity]
drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
drivers/gpu/drm/amd/display/dc/inc/core_types.h
drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
drivers/gpu/drm/amd/display/dc/inc/resource.h
drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c
drivers/gpu/drm/amd/display/dmub/dmub_srv.h [moved from drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h with 97% similarity]
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h
drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h
drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c
drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
drivers/gpu/drm/amd/display/include/hdcp_types.h
drivers/gpu/drm/amd/display/include/logger_interface.h
drivers/gpu/drm/amd/display/modules/color/color_gamma.c
drivers/gpu/drm/amd/display/modules/color/color_gamma.h
drivers/gpu/drm/amd/display/modules/freesync/freesync.c
drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
drivers/gpu/drm/amd/display/modules/power/power_helpers.c
drivers/gpu/drm/amd/display/modules/power/power_helpers.h
drivers/gpu/drm/amd/display/modules/stats/stats.c [deleted file]
drivers/gpu/drm/amd/display/modules/vmid/vmid.c
drivers/gpu/drm/amd/include/amd_shared.h
drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h
drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_1_offset.h
drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_offset.h
drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h
drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_offset.h [new file with mode: 0644]
drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_sh_mask.h [new file with mode: 0644]
drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_offset.h [new file with mode: 0644]
drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_sh_mask.h [new file with mode: 0644]
drivers/gpu/drm/amd/include/atomfirmware.h
drivers/gpu/drm/amd/include/cgs_common.h
drivers/gpu/drm/amd/powerplay/amd_powerplay.c
drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c
drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h
drivers/gpu/drm/amd/powerplay/inc/smu_types.h
drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
drivers/gpu/drm/amd/powerplay/inc/smumgr.h
drivers/gpu/drm/amd/powerplay/navi10_ppt.c
drivers/gpu/drm/amd/powerplay/renoir_ppt.c
drivers/gpu/drm/amd/powerplay/smu_internal.h
drivers/gpu/drm/amd/powerplay/smu_v11_0.c
drivers/gpu/drm/amd/powerplay/smu_v12_0.c
drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
drivers/gpu/drm/amd/powerplay/vega20_ppt.c
drivers/gpu/drm/arc/arcpgu_drv.c
drivers/gpu/drm/arm/display/komeda/komeda_kms.c
drivers/gpu/drm/arm/hdlcd_drv.c
drivers/gpu/drm/arm/malidp_drv.c
drivers/gpu/drm/armada/armada_drv.c
drivers/gpu/drm/aspeed/aspeed_gfx.h
drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
drivers/gpu/drm/aspeed/aspeed_gfx_out.c
drivers/gpu/drm/ast/ast_drv.c
drivers/gpu/drm/ast/ast_main.c
drivers/gpu/drm/ast/ast_mode.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
drivers/gpu/drm/bochs/bochs.h
drivers/gpu/drm/bochs/bochs_drv.c
drivers/gpu/drm/bochs/bochs_kms.c
drivers/gpu/drm/bridge/Kconfig
drivers/gpu/drm/bridge/Makefile
drivers/gpu/drm/bridge/adv7511/Kconfig
drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
drivers/gpu/drm/bridge/chrontel-ch7033.c [new file with mode: 0644]
drivers/gpu/drm/bridge/nwl-dsi.c [new file with mode: 0644]
drivers/gpu/drm/bridge/nwl-dsi.h [new file with mode: 0644]
drivers/gpu/drm/bridge/panel.c
drivers/gpu/drm/bridge/parade-ps8640.c
drivers/gpu/drm/bridge/sii9234.c
drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
drivers/gpu/drm/bridge/tc358768.c
drivers/gpu/drm/cirrus/Kconfig [deleted file]
drivers/gpu/drm/cirrus/Makefile [deleted file]
drivers/gpu/drm/drm_atomic.c
drivers/gpu/drm/drm_auth.c
drivers/gpu/drm/drm_blend.c
drivers/gpu/drm/drm_bufs.c
drivers/gpu/drm/drm_client.c
drivers/gpu/drm/drm_connector.c
drivers/gpu/drm/drm_crtc_internal.h
drivers/gpu/drm/drm_debugfs.c
drivers/gpu/drm/drm_dma.c
drivers/gpu/drm/drm_dp_helper.c
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_fb_cma_helper.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_file.c
drivers/gpu/drm/drm_framebuffer.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/drm_gem_framebuffer_helper.c
drivers/gpu/drm/drm_gem_vram_helper.c
drivers/gpu/drm/drm_internal.h
drivers/gpu/drm/drm_ioctl.c
drivers/gpu/drm/drm_managed.c [new file with mode: 0644]
drivers/gpu/drm/drm_mipi_dbi.c
drivers/gpu/drm/drm_mm.c
drivers/gpu/drm/drm_mode_config.c
drivers/gpu/drm/drm_mode_object.c
drivers/gpu/drm/drm_modes.c
drivers/gpu/drm/drm_pci.c
drivers/gpu/drm/drm_plane.c
drivers/gpu/drm/drm_vblank.c
drivers/gpu/drm/drm_vm.c
drivers/gpu/drm/drm_vram_helper_common.c [deleted file]
drivers/gpu/drm/etnaviv/etnaviv_drv.c
drivers/gpu/drm/exynos/exynos_dp.c
drivers/gpu/drm/exynos/exynos_drm_dpi.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_dsi.c
drivers/gpu/drm/exynos/exynos_drm_fbdev.c
drivers/gpu/drm/exynos/exynos_drm_gem.c
drivers/gpu/drm/exynos/exynos_drm_gem.h
drivers/gpu/drm/exynos/exynos_drm_mic.c
drivers/gpu/drm/exynos/exynos_drm_rotator.c
drivers/gpu/drm/exynos/exynos_drm_scaler.c
drivers/gpu/drm/exynos/exynos_drm_vidi.c
drivers/gpu/drm/exynos/exynos_hdmi.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
drivers/gpu/drm/gma500/cdv_intel_crt.c
drivers/gpu/drm/gma500/cdv_intel_dp.c
drivers/gpu/drm/gma500/cdv_intel_hdmi.c
drivers/gpu/drm/gma500/cdv_intel_lvds.c
drivers/gpu/drm/gma500/framebuffer.c
drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
drivers/gpu/drm/gma500/mdfld_intel_display.c
drivers/gpu/drm/gma500/mdfld_output.h
drivers/gpu/drm/gma500/mdfld_tmd_vid.c
drivers/gpu/drm/gma500/mdfld_tpo_vid.c
drivers/gpu/drm/gma500/oaktrail_hdmi.c
drivers/gpu/drm/gma500/oaktrail_lvds.c
drivers/gpu/drm/gma500/psb_intel_drv.h
drivers/gpu/drm/gma500/psb_intel_lvds.c
drivers/gpu/drm/gma500/psb_intel_sdvo.c
drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h
drivers/gpu/drm/i2c/sil164_drv.c
drivers/gpu/drm/i2c/tda998x_drv.c
drivers/gpu/drm/i915/Kconfig.profile
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/display/icl_dsi.c
drivers/gpu/drm/i915/display/intel_atomic_plane.c
drivers/gpu/drm/i915/display/intel_atomic_plane.h
drivers/gpu/drm/i915/display/intel_audio.c
drivers/gpu/drm/i915/display/intel_bw.c
drivers/gpu/drm/i915/display/intel_bw.h
drivers/gpu/drm/i915/display/intel_color.c
drivers/gpu/drm/i915/display/intel_connector.c
drivers/gpu/drm/i915/display/intel_crt.c
drivers/gpu/drm/i915/display/intel_ddi.c
drivers/gpu/drm/i915/display/intel_ddi.h
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/i915/display/intel_display.h
drivers/gpu/drm/i915/display/intel_display_debugfs.c
drivers/gpu/drm/i915/display/intel_display_debugfs.h
drivers/gpu/drm/i915/display/intel_display_power.c
drivers/gpu/drm/i915/display/intel_display_power.h
drivers/gpu/drm/i915/display/intel_display_types.h
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/display/intel_dp.h
drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
drivers/gpu/drm/i915/display/intel_dp_link_training.c
drivers/gpu/drm/i915/display/intel_dp_link_training.h
drivers/gpu/drm/i915/display/intel_dp_mst.c
drivers/gpu/drm/i915/display/intel_dpll_mgr.c
drivers/gpu/drm/i915/display/intel_dsb.c
drivers/gpu/drm/i915/display/intel_dsi.c
drivers/gpu/drm/i915/display/intel_dsi_vbt.c
drivers/gpu/drm/i915/display/intel_dvo.c
drivers/gpu/drm/i915/display/intel_fbc.c
drivers/gpu/drm/i915/display/intel_fbdev.c
drivers/gpu/drm/i915/display/intel_frontbuffer.c
drivers/gpu/drm/i915/display/intel_global_state.c
drivers/gpu/drm/i915/display/intel_gmbus.c
drivers/gpu/drm/i915/display/intel_hdcp.c
drivers/gpu/drm/i915/display/intel_hdcp.h
drivers/gpu/drm/i915/display/intel_hdmi.c
drivers/gpu/drm/i915/display/intel_hotplug.c
drivers/gpu/drm/i915/display/intel_hotplug.h
drivers/gpu/drm/i915/display/intel_lspcon.c
drivers/gpu/drm/i915/display/intel_lvds.c
drivers/gpu/drm/i915/display/intel_overlay.c
drivers/gpu/drm/i915/display/intel_panel.c
drivers/gpu/drm/i915/display/intel_panel.h
drivers/gpu/drm/i915/display/intel_psr.c
drivers/gpu/drm/i915/display/intel_psr.h
drivers/gpu/drm/i915/display/intel_sdvo.c
drivers/gpu/drm/i915/display/intel_sprite.c
drivers/gpu/drm/i915/display/intel_tc.c
drivers/gpu/drm/i915/display/intel_tc.h
drivers/gpu/drm/i915/display/intel_tv.c
drivers/gpu/drm/i915/display/intel_vbt_defs.h
drivers/gpu/drm/i915/display/vlv_dsi.c
drivers/gpu/drm/i915/gem/i915_gem_clflush.c
drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
drivers/gpu/drm/i915/gem/i915_gem_context.c
drivers/gpu/drm/i915/gem/i915_gem_context.h
drivers/gpu/drm/i915/gem/i915_gem_domain.c
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
drivers/gpu/drm/i915/gem/i915_gem_fence.c
drivers/gpu/drm/i915/gem/i915_gem_mman.c
drivers/gpu/drm/i915/gem/i915_gem_object.c
drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
drivers/gpu/drm/i915/gem/i915_gem_object_blt.h
drivers/gpu/drm/i915/gem/i915_gem_object_types.h
drivers/gpu/drm/i915/gem/i915_gem_pages.c
drivers/gpu/drm/i915/gem/i915_gem_phys.c
drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
drivers/gpu/drm/i915/gem/selftests/huge_pages.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c [new file with mode: 0644]
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
drivers/gpu/drm/i915/gt/debugfs_engines.c
drivers/gpu/drm/i915/gt/debugfs_gt.c
drivers/gpu/drm/i915/gt/debugfs_gt.h
drivers/gpu/drm/i915/gt/debugfs_gt_pm.c
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
drivers/gpu/drm/i915/gt/intel_context.c
drivers/gpu/drm/i915/gt/intel_context_sseu.c
drivers/gpu/drm/i915/gt/intel_context_types.h
drivers/gpu/drm/i915/gt/intel_engine.h
drivers/gpu/drm/i915/gt/intel_engine_cs.c
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
drivers/gpu/drm/i915/gt/intel_engine_pm.c
drivers/gpu/drm/i915/gt/intel_engine_pm.h
drivers/gpu/drm/i915/gt/intel_engine_pool.h [deleted file]
drivers/gpu/drm/i915/gt/intel_engine_types.h
drivers/gpu/drm/i915/gt/intel_ggtt.c
drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c [moved from drivers/gpu/drm/i915/i915_gem_fence_reg.c with 88% similarity]
drivers/gpu/drm/i915/gt/intel_ggtt_fencing.h [moved from drivers/gpu/drm/i915/i915_gem_fence_reg.h with 86% similarity]
drivers/gpu/drm/i915/gt/intel_gpu_commands.h
drivers/gpu/drm/i915/gt/intel_gt.c
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c [moved from drivers/gpu/drm/i915/gt/intel_engine_pool.c with 53% similarity]
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h [new file with mode: 0644]
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h [moved from drivers/gpu/drm/i915/gt/intel_engine_pool_types.h with 54% similarity]
drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c [new file with mode: 0644]
drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h [new file with mode: 0644]
drivers/gpu/drm/i915/gt/intel_gt_pm.c
drivers/gpu/drm/i915/gt/intel_gt_requests.c
drivers/gpu/drm/i915/gt/intel_gt_types.h
drivers/gpu/drm/i915/gt/intel_gtt.h
drivers/gpu/drm/i915/gt/intel_lrc.c
drivers/gpu/drm/i915/gt/intel_lrc.h
drivers/gpu/drm/i915/gt/intel_lrc_reg.h
drivers/gpu/drm/i915/gt/intel_rc6.c
drivers/gpu/drm/i915/gt/intel_renderstate.c
drivers/gpu/drm/i915/gt/intel_reset.c
drivers/gpu/drm/i915/gt/intel_ring.h
drivers/gpu/drm/i915/gt/intel_ring_submission.c
drivers/gpu/drm/i915/gt/intel_rps.c
drivers/gpu/drm/i915/gt/intel_rps.h
drivers/gpu/drm/i915/gt/intel_rps_types.h
drivers/gpu/drm/i915/gt/intel_sseu.c
drivers/gpu/drm/i915/gt/intel_timeline.c
drivers/gpu/drm/i915/gt/intel_timeline.h
drivers/gpu/drm/i915/gt/intel_workarounds.c
drivers/gpu/drm/i915/gt/mock_engine.c
drivers/gpu/drm/i915/gt/selftest_context.c
drivers/gpu/drm/i915/gt/selftest_gt_pm.c
drivers/gpu/drm/i915/gt/selftest_lrc.c
drivers/gpu/drm/i915/gt/selftest_rc6.c
drivers/gpu/drm/i915/gt/selftest_ring_submission.c
drivers/gpu/drm/i915/gt/selftest_rps.c [new file with mode: 0644]
drivers/gpu/drm/i915/gt/selftest_rps.h [new file with mode: 0644]
drivers/gpu/drm/i915/gt/shmem_utils.c [new file with mode: 0644]
drivers/gpu/drm/i915/gt/shmem_utils.h [new file with mode: 0644]
drivers/gpu/drm/i915/gt/st_shmem_utils.c [new file with mode: 0644]
drivers/gpu/drm/i915/gt/sysfs_engines.c
drivers/gpu/drm/i915/gt/uc/intel_guc.c
drivers/gpu/drm/i915/gt/uc/intel_guc.h
drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c [new file with mode: 0644]
drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.h [new file with mode: 0644]
drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h
drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c [new file with mode: 0644]
drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.h [new file with mode: 0644]
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
drivers/gpu/drm/i915/gt/uc/intel_huc.c
drivers/gpu/drm/i915/gt/uc/intel_huc.h
drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.c [new file with mode: 0644]
drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.h [new file with mode: 0644]
drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h
drivers/gpu/drm/i915/gt/uc/intel_uc.c
drivers/gpu/drm/i915/gt/uc/intel_uc.h
drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c [new file with mode: 0644]
drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.h [new file with mode: 0644]
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
drivers/gpu/drm/i915/gvt/aperture_gm.c
drivers/gpu/drm/i915/gvt/cmd_parser.c
drivers/gpu/drm/i915/gvt/execlist.c
drivers/gpu/drm/i915/gvt/gtt.c
drivers/gpu/drm/i915/gvt/gtt.h
drivers/gpu/drm/i915/gvt/gvt.c
drivers/gpu/drm/i915/gvt/gvt.h
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/gvt/hypercall.h
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/gvt/scheduler.h
drivers/gpu/drm/i915/i915_active.c
drivers/gpu/drm/i915/i915_active.h
drivers/gpu/drm/i915/i915_config.c [new file with mode: 0644]
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_debugfs.h
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_getparam.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_gpu_error.h
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_pci.c
drivers/gpu/drm/i915/i915_perf.c
drivers/gpu/drm/i915/i915_perf_types.h
drivers/gpu/drm/i915/i915_pmu.c
drivers/gpu/drm/i915/i915_priolist_types.h
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_request.c
drivers/gpu/drm/i915/i915_request.h
drivers/gpu/drm/i915/i915_scheduler.c
drivers/gpu/drm/i915/i915_scheduler_types.h
drivers/gpu/drm/i915/i915_selftest.h
drivers/gpu/drm/i915/i915_sw_fence.c
drivers/gpu/drm/i915/i915_sw_fence_work.c
drivers/gpu/drm/i915/i915_sw_fence_work.h
drivers/gpu/drm/i915/i915_switcheroo.c
drivers/gpu/drm/i915/i915_utils.c
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/i915_vma.h
drivers/gpu/drm/i915/intel_device_info.c
drivers/gpu/drm/i915/intel_device_info.h
drivers/gpu/drm/i915/intel_dram.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_pm.h
drivers/gpu/drm/i915/intel_sideband.c
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/i915/intel_uncore.h
drivers/gpu/drm/i915/intel_wakeref.c
drivers/gpu/drm/i915/intel_wakeref.h
drivers/gpu/drm/i915/intel_wopcm.c
drivers/gpu/drm/i915/oa/i915_oa_bdw.c [deleted file]
drivers/gpu/drm/i915/oa/i915_oa_bdw.h [deleted file]
drivers/gpu/drm/i915/oa/i915_oa_bxt.c [deleted file]
drivers/gpu/drm/i915/oa/i915_oa_bxt.h [deleted file]
drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c [deleted file]
drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h [deleted file]
drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c [deleted file]
drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h [deleted file]
drivers/gpu/drm/i915/oa/i915_oa_chv.c [deleted file]
drivers/gpu/drm/i915/oa/i915_oa_chv.h [deleted file]
drivers/gpu/drm/i915/oa/i915_oa_cnl.c [deleted file]
drivers/gpu/drm/i915/oa/i915_oa_cnl.h [deleted file]
drivers/gpu/drm/i915/oa/i915_oa_glk.c [deleted file]
drivers/gpu/drm/i915/oa/i915_oa_glk.h [deleted file]
drivers/gpu/drm/i915/oa/i915_oa_hsw.c [deleted file]
drivers/gpu/drm/i915/oa/i915_oa_hsw.h [deleted file]
drivers/gpu/drm/i915/oa/i915_oa_icl.c [deleted file]
drivers/gpu/drm/i915/oa/i915_oa_icl.h [deleted file]
drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c [deleted file]
drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h [deleted file]
drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c [deleted file]
drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h [deleted file]
drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c [deleted file]
drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h [deleted file]
drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c [deleted file]
drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h [deleted file]
drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c [deleted file]
drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h [deleted file]
drivers/gpu/drm/i915/oa/i915_oa_tgl.c [deleted file]
drivers/gpu/drm/i915/oa/i915_oa_tgl.h [deleted file]
drivers/gpu/drm/i915/selftests/i915_active.c
drivers/gpu/drm/i915/selftests/i915_gem.c
drivers/gpu/drm/i915/selftests/i915_gem_evict.c
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
drivers/gpu/drm/i915/selftests/i915_live_selftests.h
drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
drivers/gpu/drm/i915/selftests/i915_perf.c
drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
drivers/gpu/drm/i915/selftests/i915_request.c
drivers/gpu/drm/i915/selftests/i915_selftest.c
drivers/gpu/drm/i915/selftests/igt_spinner.c
drivers/gpu/drm/i915/selftests/intel_memory_region.c
drivers/gpu/drm/i915/selftests/librapl.c [new file with mode: 0644]
drivers/gpu/drm/i915/selftests/librapl.h [new file with mode: 0644]
drivers/gpu/drm/i915/selftests/mock_gem_device.c
drivers/gpu/drm/imx/dw_hdmi-imx.c
drivers/gpu/drm/imx/imx-drm-core.c
drivers/gpu/drm/imx/imx-drm.h
drivers/gpu/drm/imx/imx-ldb.c
drivers/gpu/drm/imx/imx-tve.c
drivers/gpu/drm/imx/parallel-display.c
drivers/gpu/drm/ingenic/ingenic-drm.c
drivers/gpu/drm/lima/Kconfig
drivers/gpu/drm/lima/Makefile
drivers/gpu/drm/lima/lima_bcast.c
drivers/gpu/drm/lima/lima_bcast.h
drivers/gpu/drm/lima/lima_ctx.c
drivers/gpu/drm/lima/lima_ctx.h
drivers/gpu/drm/lima/lima_devfreq.c [new file with mode: 0644]
drivers/gpu/drm/lima/lima_devfreq.h [new file with mode: 0644]
drivers/gpu/drm/lima/lima_device.c
drivers/gpu/drm/lima/lima_device.h
drivers/gpu/drm/lima/lima_dlbu.c
drivers/gpu/drm/lima/lima_dlbu.h
drivers/gpu/drm/lima/lima_drv.c
drivers/gpu/drm/lima/lima_drv.h
drivers/gpu/drm/lima/lima_dump.h [new file with mode: 0644]
drivers/gpu/drm/lima/lima_gp.c
drivers/gpu/drm/lima/lima_gp.h
drivers/gpu/drm/lima/lima_l2_cache.c
drivers/gpu/drm/lima/lima_l2_cache.h
drivers/gpu/drm/lima/lima_mmu.c
drivers/gpu/drm/lima/lima_mmu.h
drivers/gpu/drm/lima/lima_pmu.c
drivers/gpu/drm/lima/lima_pmu.h
drivers/gpu/drm/lima/lima_pp.c
drivers/gpu/drm/lima/lima_pp.h
drivers/gpu/drm/lima/lima_sched.c
drivers/gpu/drm/lima/lima_sched.h
drivers/gpu/drm/lima/lima_trace.c [new file with mode: 0644]
drivers/gpu/drm/lima/lima_trace.h [new file with mode: 0644]
drivers/gpu/drm/lima/lima_vm.h
drivers/gpu/drm/mcde/mcde_display.c
drivers/gpu/drm/mcde/mcde_drm.h
drivers/gpu/drm/mcde/mcde_drv.c
drivers/gpu/drm/mcde/mcde_dsi.c
drivers/gpu/drm/mediatek/mtk_dpi.c
drivers/gpu/drm/mediatek/mtk_drm_drv.c
drivers/gpu/drm/mediatek/mtk_drm_gem.c
drivers/gpu/drm/mediatek/mtk_dsi.c
drivers/gpu/drm/mediatek/mtk_hdmi.c
drivers/gpu/drm/mediatek/mtk_mipi_tx.c
drivers/gpu/drm/mediatek/mtk_mipi_tx.h
drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c
drivers/gpu/drm/meson/meson_drv.c
drivers/gpu/drm/meson/meson_drv.h
drivers/gpu/drm/meson/meson_dw_hdmi.c
drivers/gpu/drm/meson/meson_plane.c
drivers/gpu/drm/meson/meson_vclk.c
drivers/gpu/drm/meson/meson_vclk.h
drivers/gpu/drm/mgag200/mgag200_cursor.c
drivers/gpu/drm/mgag200/mgag200_drv.c
drivers/gpu/drm/mgag200/mgag200_drv.h
drivers/gpu/drm/mgag200/mgag200_i2c.c
drivers/gpu/drm/mgag200/mgag200_main.c
drivers/gpu/drm/mgag200/mgag200_mode.c
drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
drivers/gpu/drm/msm/adreno/a5xx_gpu.h
drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
drivers/gpu/drm/msm/msm_debugfs.c
drivers/gpu/drm/msm/msm_debugfs.h
drivers/gpu/drm/msm/msm_gpu.h
drivers/gpu/drm/nouveau/Kbuild
drivers/gpu/drm/nouveau/dispnv04/crtc.c
drivers/gpu/drm/nouveau/dispnv04/disp.c
drivers/gpu/drm/nouveau/dispnv04/overlay.c
drivers/gpu/drm/nouveau/dispnv50/base507c.c
drivers/gpu/drm/nouveau/dispnv50/core.h
drivers/gpu/drm/nouveau/dispnv50/core507d.c
drivers/gpu/drm/nouveau/dispnv50/core827d.c
drivers/gpu/drm/nouveau/dispnv50/core907d.c
drivers/gpu/drm/nouveau/dispnv50/core917d.c
drivers/gpu/drm/nouveau/dispnv50/corec37d.c
drivers/gpu/drm/nouveau/dispnv50/corec57d.c
drivers/gpu/drm/nouveau/dispnv50/curs507a.c
drivers/gpu/drm/nouveau/dispnv50/disp.c
drivers/gpu/drm/nouveau/dispnv50/disp.h
drivers/gpu/drm/nouveau/dispnv50/headc37d.c
drivers/gpu/drm/nouveau/dispnv50/headc57d.c
drivers/gpu/drm/nouveau/dispnv50/pior507d.c
drivers/gpu/drm/nouveau/dispnv50/sor507d.c
drivers/gpu/drm/nouveau/dispnv50/sor907d.c
drivers/gpu/drm/nouveau/dispnv50/sorc37d.c
drivers/gpu/drm/nouveau/dispnv50/wndw.c
drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
drivers/gpu/drm/nouveau/include/nvif/class.h
drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
drivers/gpu/drm/nouveau/nouveau_acpi.c
drivers/gpu/drm/nouveau/nouveau_acpi.h
drivers/gpu/drm/nouveau/nouveau_connector.c
drivers/gpu/drm/nouveau/nouveau_connector.h
drivers/gpu/drm/nouveau/nouveau_debugfs.c
drivers/gpu/drm/nouveau/nouveau_debugfs.h
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_display.h
drivers/gpu/drm/nouveau/nouveau_dmem.c
drivers/gpu/drm/nouveau/nouveau_dmem.h
drivers/gpu/drm/nouveau/nouveau_dp.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_encoder.h
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/nouveau/nouveau_fbcon.h
drivers/gpu/drm/nouveau/nouveau_gem.c
drivers/gpu/drm/nouveau/nouveau_platform.c
drivers/gpu/drm/nouveau/nouveau_svm.c
drivers/gpu/drm/nouveau/nouveau_svm.h
drivers/gpu/drm/nouveau/nv50_fbcon.c
drivers/gpu/drm/nouveau/nvc0_fbcon.c
drivers/gpu/drm/nouveau/nvkm/core/memory.c
drivers/gpu/drm/nouveau/nvkm/core/subdev.c
drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/disp/capsgv100.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf119.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagv100.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgv100.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
drivers/gpu/drm/nouveau/nvkm/engine/disp/roottu102.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgk104.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf117.c
drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm200.c
drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gp10b.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c
drivers/gpu/drm/omapdrm/dss/dispc.c
drivers/gpu/drm/omapdrm/dss/venc.c
drivers/gpu/drm/omapdrm/omap_debugfs.c
drivers/gpu/drm/omapdrm/omap_drv.h
drivers/gpu/drm/panel/Kconfig
drivers/gpu/drm/panel/Makefile
drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c [new file with mode: 0644]
drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
drivers/gpu/drm/panel/panel-ilitek-ili9322.c
drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c [new file with mode: 0644]
drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
drivers/gpu/drm/panel/panel-novatek-nt39016.c
drivers/gpu/drm/panel/panel-simple.c
drivers/gpu/drm/panel/panel-truly-nt35597.c
drivers/gpu/drm/panel/panel-visionox-rm69299.c [new file with mode: 0644]
drivers/gpu/drm/pl111/Makefile
drivers/gpu/drm/pl111/pl111_debugfs.c
drivers/gpu/drm/pl111/pl111_drm.h
drivers/gpu/drm/pl111/pl111_drv.c
drivers/gpu/drm/pl111/pl111_versatile.c
drivers/gpu/drm/pl111/pl111_vexpress.c [deleted file]
drivers/gpu/drm/pl111/pl111_vexpress.h [deleted file]
drivers/gpu/drm/qxl/qxl_debugfs.c
drivers/gpu/drm/qxl/qxl_display.c
drivers/gpu/drm/qxl/qxl_drv.c
drivers/gpu/drm/qxl/qxl_drv.h
drivers/gpu/drm/qxl/qxl_dumb.c
drivers/gpu/drm/qxl/qxl_gem.c
drivers/gpu/drm/qxl/qxl_ioctl.c
drivers/gpu/drm/qxl/qxl_irq.c
drivers/gpu/drm/qxl/qxl_kms.c
drivers/gpu/drm/qxl/qxl_object.c
drivers/gpu/drm/qxl/qxl_release.c
drivers/gpu/drm/qxl/qxl_ttm.c
drivers/gpu/drm/r128/ati_pcigart.c
drivers/gpu/drm/radeon/Makefile
drivers/gpu/drm/radeon/atom.c
drivers/gpu/drm/radeon/ci_dpm.c
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_combios.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/si_dpm.c
drivers/gpu/drm/rcar-du/rcar_du_drv.c
drivers/gpu/drm/rcar-du/rcar_du_encoder.c
drivers/gpu/drm/rcar-du/rcar_du_kms.c
drivers/gpu/drm/rcar-du/rcar_du_plane.c
drivers/gpu/drm/rcar-du/rcar_du_vsp.c
drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
drivers/gpu/drm/rockchip/cdn-dp-core.c
drivers/gpu/drm/rockchip/cdn-dp-reg.c
drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
drivers/gpu/drm/rockchip/inno_hdmi.c
drivers/gpu/drm/rockchip/rk3066_hdmi.c
drivers/gpu/drm/rockchip/rockchip_drm_drv.c
drivers/gpu/drm/rockchip/rockchip_drm_drv.h
drivers/gpu/drm/rockchip/rockchip_drm_fb.c
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
drivers/gpu/drm/rockchip/rockchip_drm_vop.h
drivers/gpu/drm/rockchip/rockchip_lvds.c
drivers/gpu/drm/rockchip/rockchip_rgb.c
drivers/gpu/drm/rockchip/rockchip_vop_reg.c
drivers/gpu/drm/shmobile/shmob_drm_crtc.c
drivers/gpu/drm/shmobile/shmob_drm_drv.c
drivers/gpu/drm/shmobile/shmob_drm_kms.c
drivers/gpu/drm/sti/sti_compositor.c
drivers/gpu/drm/sti/sti_compositor.h
drivers/gpu/drm/sti/sti_crtc.c
drivers/gpu/drm/sti/sti_cursor.c
drivers/gpu/drm/sti/sti_drv.c
drivers/gpu/drm/sti/sti_dvo.c
drivers/gpu/drm/sti/sti_gdp.c
drivers/gpu/drm/sti/sti_hda.c
drivers/gpu/drm/sti/sti_hdmi.c
drivers/gpu/drm/sti/sti_hqvdp.c
drivers/gpu/drm/sti/sti_mixer.c
drivers/gpu/drm/sti/sti_mixer.h
drivers/gpu/drm/sti/sti_tvout.c
drivers/gpu/drm/sti/sti_vid.c
drivers/gpu/drm/sti/sti_vid.h
drivers/gpu/drm/stm/drv.c
drivers/gpu/drm/stm/ltdc.c
drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
drivers/gpu/drm/sun4i/sun4i_lvds.c
drivers/gpu/drm/sun4i/sun4i_rgb.c
drivers/gpu/drm/sun4i/sun4i_tcon.c
drivers/gpu/drm/sun4i/sun4i_tv.c
drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
drivers/gpu/drm/sun4i/sun8i_mixer.c
drivers/gpu/drm/sun4i/sun8i_mixer.h
drivers/gpu/drm/sun4i/sun8i_ui_layer.c
drivers/gpu/drm/sun4i/sun8i_vi_layer.c
drivers/gpu/drm/tegra/dc.c
drivers/gpu/drm/tegra/dpaux.c
drivers/gpu/drm/tegra/drm.c
drivers/gpu/drm/tegra/drm.h
drivers/gpu/drm/tegra/dsi.c
drivers/gpu/drm/tegra/fb.c
drivers/gpu/drm/tegra/hdmi.c
drivers/gpu/drm/tegra/output.c
drivers/gpu/drm/tegra/rgb.c
drivers/gpu/drm/tegra/sor.c
drivers/gpu/drm/tidss/tidss_crtc.c
drivers/gpu/drm/tidss/tidss_dispc.c
drivers/gpu/drm/tidss/tidss_dispc.h
drivers/gpu/drm/tidss/tidss_drv.c
drivers/gpu/drm/tidss/tidss_drv.h
drivers/gpu/drm/tidss/tidss_irq.c
drivers/gpu/drm/tidss/tidss_kms.c
drivers/gpu/drm/tidss/tidss_kms.h
drivers/gpu/drm/tidss/tidss_plane.c
drivers/gpu/drm/tilcdc/tilcdc_drv.c
drivers/gpu/drm/tilcdc/tilcdc_external.c
drivers/gpu/drm/tilcdc/tilcdc_panel.c
drivers/gpu/drm/tiny/Kconfig
drivers/gpu/drm/tiny/Makefile
drivers/gpu/drm/tiny/cirrus.c [moved from drivers/gpu/drm/cirrus/cirrus.c with 91% similarity]
drivers/gpu/drm/tiny/gm12u320.c
drivers/gpu/drm/tiny/hx8357d.c
drivers/gpu/drm/tiny/ili9225.c
drivers/gpu/drm/tiny/ili9341.c
drivers/gpu/drm/tiny/ili9486.c
drivers/gpu/drm/tiny/mi0283qt.c
drivers/gpu/drm/tiny/repaper.c
drivers/gpu/drm/tiny/st7586.c
drivers/gpu/drm/tiny/st7735r.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/udl/udl_connector.c
drivers/gpu/drm/udl/udl_drv.c
drivers/gpu/drm/udl/udl_drv.h
drivers/gpu/drm/udl/udl_main.c
drivers/gpu/drm/udl/udl_modeset.c
drivers/gpu/drm/v3d/v3d_debugfs.c
drivers/gpu/drm/v3d/v3d_drv.c
drivers/gpu/drm/v3d/v3d_drv.h
drivers/gpu/drm/v3d/v3d_gem.c
drivers/gpu/drm/v3d/v3d_irq.c
drivers/gpu/drm/v3d/v3d_mmu.c
drivers/gpu/drm/v3d/v3d_sched.c
drivers/gpu/drm/vboxvideo/vbox_drv.c
drivers/gpu/drm/vboxvideo/vbox_drv.h
drivers/gpu/drm/vboxvideo/vbox_irq.c
drivers/gpu/drm/vboxvideo/vbox_main.c
drivers/gpu/drm/vboxvideo/vbox_mode.c
drivers/gpu/drm/vboxvideo/vbox_ttm.c
drivers/gpu/drm/vc4/vc4_debugfs.c
drivers/gpu/drm/vc4/vc4_dpi.c
drivers/gpu/drm/vc4/vc4_drv.h
drivers/gpu/drm/vc4/vc4_dsi.c
drivers/gpu/drm/vc4/vc4_hdmi.c
drivers/gpu/drm/vc4/vc4_vec.c
drivers/gpu/drm/vgem/vgem_drv.c
drivers/gpu/drm/virtio/virtgpu_debugfs.c
drivers/gpu/drm/virtio/virtgpu_display.c
drivers/gpu/drm/virtio/virtgpu_drv.h
drivers/gpu/drm/virtio/virtgpu_gem.c
drivers/gpu/drm/virtio/virtgpu_ioctl.c
drivers/gpu/drm/virtio/virtgpu_object.c
drivers/gpu/drm/virtio/virtgpu_vq.c
drivers/gpu/drm/vkms/vkms_drv.c
drivers/gpu/drm/vkms/vkms_drv.h
drivers/gpu/drm/vkms/vkms_gem.c
drivers/gpu/drm/vkms/vkms_output.c
drivers/gpu/drm/xen/xen_drm_front.c
drivers/gpu/drm/zte/zx_hdmi.c
drivers/gpu/drm/zte/zx_tvenc.c
drivers/gpu/drm/zte/zx_vga.c
drivers/video/fbdev/Kconfig
drivers/video/fbdev/arcfb.c
drivers/video/fbdev/atmel_lcdfb.c
drivers/video/fbdev/aty/aty128fb.c
drivers/video/fbdev/aty/atyfb_base.c
drivers/video/fbdev/controlfb.c
drivers/video/fbdev/core/fbmon.c
drivers/video/fbdev/cyber2000fb.c
drivers/video/fbdev/i810/i810_main.c
drivers/video/fbdev/imxfb.c
drivers/video/fbdev/matrox/g450_pll.c
drivers/video/fbdev/matrox/matroxfb_base.h
drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
drivers/video/fbdev/mx3fb.c
drivers/video/fbdev/omap/omapfb_main.c
drivers/video/fbdev/omap2/omapfb/dss/dispc.c
drivers/video/fbdev/omap2/omapfb/dss/dss.h
drivers/video/fbdev/omap2/omapfb/dss/venc.c
drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
drivers/video/fbdev/pm2fb.c
drivers/video/fbdev/pm3fb.c
drivers/video/fbdev/pxa168fb.c
drivers/video/fbdev/riva/riva_hw.c
drivers/video/fbdev/s1d13xxxfb.c
drivers/video/fbdev/sa1100fb.c
drivers/video/fbdev/sa1100fb.h
drivers/video/fbdev/savage/savagefb.h
drivers/video/fbdev/ssd1307fb.c
drivers/video/fbdev/udlfb.c
drivers/video/fbdev/uvesafb.c
drivers/video/fbdev/valkyriefb.c
drivers/video/fbdev/vesafb.c
drivers/video/fbdev/via/debug.h
drivers/video/fbdev/via/viafbdev.c
drivers/video/fbdev/vt8500lcdfb.c
drivers/video/fbdev/w100fb.c
drivers/video/hdmi.c
include/drm/drm_client.h
include/drm/drm_connector.h
include/drm/drm_debugfs.h
include/drm/drm_device.h
include/drm/drm_displayid.h
include/drm/drm_dp_helper.h
include/drm/drm_dp_mst_helper.h
include/drm/drm_drv.h
include/drm/drm_encoder.h
include/drm/drm_fb_helper.h
include/drm/drm_file.h
include/drm/drm_framebuffer.h
include/drm/drm_gem_framebuffer_helper.h
include/drm/drm_gem_vram_helper.h
include/drm/drm_legacy.h
include/drm/drm_managed.h [new file with mode: 0644]
include/drm/drm_mipi_dbi.h
include/drm/drm_mm.h
include/drm/drm_mode_config.h
include/drm/drm_modes.h
include/drm/drm_modeset_helper_vtables.h
include/drm/drm_pci.h [deleted file]
include/drm/drm_print.h
include/drm/drm_writeback.h
include/drm/gpu_scheduler.h
include/drm/i915_pciids.h
include/drm/ttm/ttm_bo_driver.h
include/drm/ttm/ttm_debug.h [deleted file]
include/linux/dma-buf.h
include/linux/hdmi.h
include/uapi/drm/amdgpu_drm.h
include/uapi/drm/drm_fourcc.h
include/uapi/drm/i915_drm.h
include/uapi/linux/kfd_ioctl.h
mm/slob.c
mm/slub.c

index 9e90c2b009609b1c3603d3113c8cc3c9dd446efa..e73662c8d339c13dac09ba82b20d4ac3486cf17b 100644 (file)
@@ -119,7 +119,7 @@ examples:
         panel@0 {
                 compatible = "bananapi,lhr050h41", "ilitek,ili9881c";
                 reg = <0>;
-                power-gpios = <&pio 1 7 0>; /* PB07 */
+                power-supply = <&reg_display>;
                 reset-gpios = <&r_pio 0 5 1>; /* PL05 */
                 backlight = <&pwm_bl>;
         };
diff --git a/Documentation/devicetree/bindings/display/bridge/adi,adv7123.txt b/Documentation/devicetree/bindings/display/bridge/adi,adv7123.txt
deleted file mode 100644 (file)
index d3c2a49..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-Analog Devices ADV7123 Video DAC
---------------------------------
-
-The ADV7123 is a digital-to-analog converter that outputs VGA signals from a
-parallel video input.
-
-Required properties:
-
-- compatible: Should be "adi,adv7123"
-
-Optional properties:
-
-- psave-gpios: Power save control GPIO
-
-Required nodes:
-
-The ADV7123 has two video ports. Their connections are modeled using the OF
-graph bindings specified in Documentation/devicetree/bindings/graph.txt.
-
-- Video port 0 for DPI input
-- Video port 1 for VGA output
-
-
-Example
--------
-
-       adv7123: encoder@0 {
-               compatible = "adi,adv7123";
-
-               ports {
-                       #address-cells = <1>;
-                       #size-cells = <0>;
-
-                       port@0 {
-                               reg = <0>;
-
-                               adv7123_in: endpoint@0 {
-                                       remote-endpoint = <&dpi_out>;
-                               };
-                       };
-
-                       port@1 {
-                               reg = <1>;
-
-                               adv7123_out: endpoint@0 {
-                                       remote-endpoint = <&vga_connector_in>;
-                               };
-                       };
-               };
-       };
index c211038699233b943ad3e10e93215ef30be40434..8c0e4f285fbcb9867dea7e5f4e17743e862903b4 100644 (file)
@@ -37,6 +37,12 @@ properties:
     type: object
 
     properties:
+      '#address-cells':
+        const: 1
+
+      '#size-cells':
+        const: 0
+
       port@0:
         type: object
         description: |
@@ -51,6 +57,8 @@ properties:
     required:
       - port@0
 
+    additionalProperties: false
+
 required:
   - compatible
   - reg
diff --git a/Documentation/devicetree/bindings/display/bridge/chrontel,ch7033.yaml b/Documentation/devicetree/bindings/display/bridge/chrontel,ch7033.yaml
new file mode 100644 (file)
index 0000000..9f38f55
--- /dev/null
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2019,2020 Lubomir Rintel <lkundrak@v3.sk>
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/bridge/chrontel,ch7033.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Chrontel CH7033 Video Encoder Device Tree Bindings
+
+maintainers:
+  - Lubomir Rintel <lkundrak@v3.sk>
+
+properties:
+  compatible:
+    const: chrontel,ch7033
+
+  reg:
+    maxItems: 1
+    description: I2C address of the device
+
+  ports:
+    type: object
+
+    properties:
+      port@0:
+        type: object
+        description: |
+          Video port for RGB input.
+
+      port@1:
+        type: object
+        description: |
+          DVI port, should be connected to a node compatible with the
+          dvi-connector binding.
+
+    required:
+      - port@0
+      - port@1
+
+required:
+  - compatible
+  - reg
+  - ports
+
+additionalProperties: false
+
+examples:
+  - |
+    i2c {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        vga-dvi-encoder@76 {
+            compatible = "chrontel,ch7033";
+            reg = <0x76>;
+
+            ports {
+                #address-cells = <1>;
+                #size-cells = <0>;
+
+                port@0 {
+                    reg = <0>;
+                    endpoint {
+                        remote-endpoint = <&lcd0_rgb_out>;
+                    };
+                };
+
+                port@1 {
+                    reg = <1>;
+                    endpoint {
+                        remote-endpoint = <&dvi_in>;
+                    };
+                };
+
+            };
+        };
+    };
diff --git a/Documentation/devicetree/bindings/display/bridge/dumb-vga-dac.txt b/Documentation/devicetree/bindings/display/bridge/dumb-vga-dac.txt
deleted file mode 100644 (file)
index 164cbb1..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-Dumb RGB to VGA DAC bridge
----------------------------
-
-This binding is aimed for dumb RGB to VGA DAC based bridges that do not require
-any configuration.
-
-Required properties:
-
-- compatible: Must be "dumb-vga-dac"
-
-Required nodes:
-
-This device has two video ports. Their connections are modelled using the OF
-graph bindings specified in Documentation/devicetree/bindings/graph.txt.
-
-- Video port 0 for RGB input
-- Video port 1 for VGA output
-
-Optional properties:
-- vdd-supply: Power supply for DAC
-
-Example
--------
-
-bridge {
-       compatible = "dumb-vga-dac";
-       #address-cells = <1>;
-       #size-cells = <0>;
-
-       ports {
-               #address-cells = <1>;
-               #size-cells = <0>;
-
-               port@0 {
-                       reg = <0>;
-
-                       vga_bridge_in: endpoint {
-                               remote-endpoint = <&tcon0_out_vga>;
-                       };
-               };
-
-               port@1 {
-                       reg = <1>;
-
-                       vga_bridge_out: endpoint {
-                               remote-endpoint = <&vga_con_in>;
-                       };
-               };
-       };
-};
diff --git a/Documentation/devicetree/bindings/display/bridge/dw_mipi_dsi.txt b/Documentation/devicetree/bindings/display/bridge/dw_mipi_dsi.txt
deleted file mode 100644 (file)
index b13adf3..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-Synopsys DesignWare MIPI DSI host controller
-============================================
-
-This document defines device tree properties for the Synopsys DesignWare MIPI
-DSI host controller. It doesn't constitue a device tree binding specification
-by itself but is meant to be referenced by platform-specific device tree
-bindings.
-
-When referenced from platform device tree bindings the properties defined in
-this document are defined as follows. The platform device tree bindings are
-responsible for defining whether each optional property is used or not.
-
-- reg: Memory mapped base address and length of the DesignWare MIPI DSI
-  host controller registers. (mandatory)
-
-- clocks: References to all the clocks specified in the clock-names property
-  as specified in [1]. (mandatory)
-
-- clock-names:
-  - "pclk" is the peripheral clock for either AHB and APB. (mandatory)
-  - "px_clk" is the pixel clock for the DPI/RGB input. (optional)
-
-- resets: References to all the resets specified in the reset-names property
-  as specified in [2]. (optional)
-
-- reset-names: string reset name, must be "apb" if used. (optional)
-
-- panel or bridge node: see [3]. (mandatory)
-
-[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
-[2] Documentation/devicetree/bindings/reset/reset.txt
-[3] Documentation/devicetree/bindings/display/mipi-dsi-bus.txt
diff --git a/Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml b/Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml
new file mode 100644 (file)
index 0000000..2c50016
--- /dev/null
@@ -0,0 +1,91 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/bridge/ite,it6505.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ITE it6505 Device Tree Bindings
+
+maintainers:
+  - Allen Chen <allen.chen@ite.com.tw>
+
+description: |
+  The IT6505 is a high-performance DisplayPort 1.1a transmitter,
+  fully compliant with DisplayPort 1.1a, HDCP 1.3 specifications.
+  The IT6505 supports color depth of up to 36 bits (12 bits/color)
+  and ensures robust transmission of high-quality uncompressed video
+  content, along with uncompressed and compressed digital audio content.
+
+  Aside from the various video output formats supported, the IT6505
+  also encodes and transmits up to 8 channels of I2S digital audio,
+  with sampling rate up to 192kHz and sample size up to 24 bits.
+  In addition, an S/PDIF input port takes in compressed audio of up to
+  192kHz frame rate.
+
+  Each IT6505 chip comes preprogrammed with an unique HDCP key,
+  in compliance with the HDCP 1.3 standard so as to provide secure
+  transmission of high-definition content. Users of the IT6505 need not
+  purchase any HDCP keys or ROMs.
+
+properties:
+  compatible:
+    const: ite,it6505
+
+  ovdd-supply:
+    maxItems: 1
+    description: I/O voltage
+
+  pwr18-supply:
+    maxItems: 1
+    description: core voltage
+
+  interrupts:
+    maxItems: 1
+    description: interrupt specifier of INT pin
+
+  reset-gpios:
+    maxItems: 1
+    description: gpio specifier of RESET pin
+
+  extcon:
+    maxItems: 1
+    description: extcon specifier for the Power Delivery
+
+  port:
+    type: object
+    description: A port node pointing to DPI host port node
+
+required:
+  - compatible
+  - ovdd-supply
+  - pwr18-supply
+  - interrupts
+  - reset-gpios
+  - extcon
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/irq.h>
+
+    i2c {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        dp-bridge@5c {
+            compatible = "ite,it6505";
+            interrupts = <152 IRQ_TYPE_EDGE_FALLING 152 0>;
+            reg = <0x5c>;
+            pinctrl-names = "default";
+            pinctrl-0 = <&it6505_pins>;
+            ovdd-supply = <&mt6358_vsim1_reg>;
+            pwr18-supply = <&it6505_pp18_reg>;
+            reset-gpios = <&pio 179 1>;
+            extcon = <&usbc_extcon>;
+
+            port {
+                it6505_in: endpoint {
+                    remote-endpoint = <&dpi_out>;
+                };
+            };
+        };
+    };
index 8f373029f5d217a07b2c0b37e988ea4cef559b92..800c63764e715f8711fca413aff75db7bbe1abae 100644 (file)
@@ -50,6 +50,12 @@ properties:
       This device has two video ports. Their connections are modeled using the
       OF graph bindings specified in Documentation/devicetree/bindings/graph.txt
     properties:
+      '#address-cells':
+        const: 1
+
+      '#size-cells':
+        const: 0
+
       port@0:
         type: object
         description: |
@@ -66,6 +72,8 @@ properties:
       - port@0
       - port@1
 
+    additionalProperties: false
+
   powerdown-gpios:
     description:
       The GPIO used to control the power down line of this device.
diff --git a/Documentation/devicetree/bindings/display/bridge/nwl-dsi.yaml b/Documentation/devicetree/bindings/display/bridge/nwl-dsi.yaml
new file mode 100644 (file)
index 0000000..8aff2d6
--- /dev/null
@@ -0,0 +1,226 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/bridge/nwl-dsi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Northwest Logic MIPI-DSI controller on i.MX SoCs
+
+maintainers:
+  - Guido Gúnther <agx@sigxcpu.org>
+  - Robert Chiras <robert.chiras@nxp.com>
+
+description: |
+  NWL MIPI-DSI host controller found on i.MX8 platforms. This is a dsi bridge for
+  the SOCs NWL MIPI-DSI host controller.
+
+properties:
+  compatible:
+    const: fsl,imx8mq-nwl-dsi
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  '#address-cells':
+    const: 1
+
+  '#size-cells':
+    const: 0
+
+  clocks:
+    items:
+      - description: DSI core clock
+      - description: RX_ESC clock (used in escape mode)
+      - description: TX_ESC clock (used in escape mode)
+      - description: PHY_REF clock
+      - description: LCDIF clock
+
+  clock-names:
+    items:
+      - const: core
+      - const: rx_esc
+      - const: tx_esc
+      - const: phy_ref
+      - const: lcdif
+
+  mux-controls:
+    description:
+      mux controller node to use for operating the input mux
+
+  phys:
+    maxItems: 1
+    description:
+      A phandle to the phy module representing the DPHY
+
+  phy-names:
+    items:
+      - const: dphy
+
+  power-domains:
+    maxItems: 1
+
+  resets:
+    items:
+      - description: dsi byte reset line
+      - description: dsi dpi reset line
+      - description: dsi esc reset line
+      - description: dsi pclk reset line
+
+  reset-names:
+    items:
+      - const: byte
+      - const: dpi
+      - const: esc
+      - const: pclk
+
+  ports:
+    type: object
+    description:
+      A node containing DSI input & output port nodes with endpoint
+      definitions as documented in
+      Documentation/devicetree/bindings/graph.txt.
+    properties:
+      port@0:
+        type: object
+        description:
+          Input port node to receive pixel data from the
+          display controller. Exactly one endpoint must be
+          specified.
+        properties:
+          '#address-cells':
+            const: 1
+
+          '#size-cells':
+            const: 0
+
+          endpoint@0:
+            description: sub-node describing the input from LCDIF
+            type: object
+
+          endpoint@1:
+            description: sub-node describing the input from DCSS
+            type: object
+
+          reg:
+            const: 0
+
+        required:
+          - '#address-cells'
+          - '#size-cells'
+          - reg
+
+        oneOf:
+          - required:
+              - endpoint@0
+          - required:
+              - endpoint@1
+
+        additionalProperties: false
+
+      port@1:
+        type: object
+        description:
+          DSI output port node to the panel or the next bridge
+          in the chain
+
+      '#address-cells':
+        const: 1
+
+      '#size-cells':
+        const: 0
+
+    required:
+      - '#address-cells'
+      - '#size-cells'
+      - port@0
+      - port@1
+
+    additionalProperties: false
+
+patternProperties:
+  "^panel@[0-9]+$":
+    type: object
+
+required:
+  - '#address-cells'
+  - '#size-cells'
+  - clock-names
+  - clocks
+  - compatible
+  - interrupts
+  - mux-controls
+  - phy-names
+  - phys
+  - ports
+  - reg
+  - reset-names
+  - resets
+
+additionalProperties: false
+
+examples:
+ - |
+
+   #include <dt-bindings/clock/imx8mq-clock.h>
+   #include <dt-bindings/interrupt-controller/arm-gic.h>
+   #include <dt-bindings/reset/imx8mq-reset.h>
+
+   mipi_dsi: mipi_dsi@30a00000 {
+              #address-cells = <1>;
+              #size-cells = <0>;
+              compatible = "fsl,imx8mq-nwl-dsi";
+              reg = <0x30A00000 0x300>;
+              clocks = <&clk IMX8MQ_CLK_DSI_CORE>,
+                       <&clk IMX8MQ_CLK_DSI_AHB>,
+                       <&clk IMX8MQ_CLK_DSI_IPG_DIV>,
+                       <&clk IMX8MQ_CLK_DSI_PHY_REF>,
+                       <&clk IMX8MQ_CLK_LCDIF_PIXEL>;
+              clock-names = "core", "rx_esc", "tx_esc", "phy_ref", "lcdif";
+              interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+              mux-controls = <&mux 0>;
+              power-domains = <&pgc_mipi>;
+              resets = <&src IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N>,
+                       <&src IMX8MQ_RESET_MIPI_DSI_DPI_RESET_N>,
+                       <&src IMX8MQ_RESET_MIPI_DSI_ESC_RESET_N>,
+                       <&src IMX8MQ_RESET_MIPI_DSI_PCLK_RESET_N>;
+              reset-names = "byte", "dpi", "esc", "pclk";
+              phys = <&dphy>;
+              phy-names = "dphy";
+
+              panel@0 {
+                      #address-cells = <1>;
+                      #size-cells = <0>;
+                      compatible = "rocktech,jh057n00900";
+                      reg = <0>;
+                      port@0 {
+                           reg = <0>;
+                           panel_in: endpoint {
+                                     remote-endpoint = <&mipi_dsi_out>;
+                           };
+                      };
+              };
+
+              ports {
+                    #address-cells = <1>;
+                    #size-cells = <0>;
+
+                    port@0 {
+                           #size-cells = <0>;
+                           #address-cells = <1>;
+                           reg = <0>;
+                           mipi_dsi_in: endpoint@0 {
+                                        reg = <0>;
+                                        remote-endpoint = <&lcdif_mipi_dsi>;
+                           };
+                    };
+                    port@1 {
+                           reg = <1>;
+                           mipi_dsi_out: endpoint {
+                                         remote-endpoint = <&panel_in>;
+                           };
+                    };
+              };
+      };
index 5dff93641bea0d8ce384ce253fbbed78027f7a20..7e27cfcf770dd802c80805293b9c8aba93d674d8 100644 (file)
@@ -50,6 +50,12 @@ properties:
       Documentation/devicetree/bindings/media/video-interfaces.txt
       Documentation/devicetree/bindings/graph.txt
     properties:
+      '#address-cells':
+        const: 1
+
+      '#size-cells':
+        const: 0
+
       port@0:
         type: object
         description: |
@@ -63,6 +69,8 @@ properties:
     required:
       - port@0
 
+    additionalProperties: false
+
 required:
   - compatible
   - reg
diff --git a/Documentation/devicetree/bindings/display/bridge/simple-bridge.yaml b/Documentation/devicetree/bindings/display/bridge/simple-bridge.yaml
new file mode 100644 (file)
index 0000000..0880cbf
--- /dev/null
@@ -0,0 +1,99 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/bridge/simple-bridge.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Transparent non-programmable DRM bridges
+
+maintainers:
+  - Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
+  - Maxime Ripard <mripard@kernel.org>
+
+description: |
+  This binding supports transparent non-programmable bridges that don't require
+  any configuration, with a single input and a single output.
+
+properties:
+  compatible:
+    oneOf:
+      - items:
+        - enum:
+          - ti,ths8134a
+          - ti,ths8134b
+        - const: ti,ths8134
+      - enum:
+        - adi,adv7123
+        - dumb-vga-dac
+        - ti,opa362
+        - ti,ths8134
+        - ti,ths8135
+
+  ports:
+    type: object
+    description: |
+      This device has two video ports. Their connections are modeled using the
+      OF graph bindings specified in Documentation/devicetree/bindings/graph.txt.
+    properties:
+      '#address-cells':
+        const: 1
+
+      '#size-cells':
+        const: 0
+
+      port@0:
+        type: object
+        description: The bridge input
+
+      port@1:
+        type: object
+        description: The bridge output
+
+    required:
+      - port@0
+      - port@1
+
+    additionalProperties: false
+
+  enable-gpios:
+    maxItems: 1
+    description: GPIO controlling bridge enable
+
+  vdd-supply:
+    maxItems: 1
+    description: Power supply for the bridge
+
+required:
+  - compatible
+  - ports
+
+additionalProperties: false
+
+examples:
+  - |
+    bridge {
+        compatible = "ti,ths8134a", "ti,ths8134";
+
+        ports {
+            #address-cells = <1>;
+            #size-cells = <0>;
+
+            port@0 {
+                reg = <0>;
+
+                vga_bridge_in: endpoint {
+                    remote-endpoint = <&tcon0_out_vga>;
+                };
+            };
+
+            port@1 {
+                reg = <1>;
+
+                vga_bridge_out: endpoint {
+                    remote-endpoint = <&vga_con_in>;
+                };
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/bridge/snps,dw-mipi-dsi.yaml b/Documentation/devicetree/bindings/display/bridge/snps,dw-mipi-dsi.yaml
new file mode 100644 (file)
index 0000000..012aa8e
--- /dev/null
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/bridge/snps,dw-mipi-dsi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Synopsys DesignWare MIPI DSI host controller
+
+maintainers:
+  - Philippe CORNU <philippe.cornu@st.com>
+
+description: |
+  This document defines device tree properties for the Synopsys DesignWare MIPI
+  DSI host controller. It doesn't constitue a device tree binding specification
+  by itself but is meant to be referenced by platform-specific device tree
+  bindings.
+
+  When referenced from platform device tree bindings the properties defined in
+  this document are defined as follows. The platform device tree bindings are
+  responsible for defining whether each property is required or optional.
+
+allOf:
+  - $ref: ../dsi-controller.yaml#
+
+properties:
+  reg:
+    maxItems: 1
+
+  clocks:
+    items:
+      - description: Module clock
+      - description: DSI bus clock for either AHB and APB
+      - description: Pixel clock for the DPI/RGB input
+    minItems: 2
+
+  clock-names:
+    items:
+      - const: ref
+      - const: pclk
+      - const: px_clk
+    minItems: 2
+
+  resets:
+    maxItems: 1
+
+  reset-names:
+    const: apb
+
+  ports:
+    type: object
+
+    properties:
+      port@0:
+        type: object
+        description: Input node to receive pixel data.
+      port@1:
+        type: object
+        description: DSI output node to panel.
+
+    required:
+      - port@0
+      - port@1
+
+required:
+  - clock-names
+  - clocks
+  - ports
+  - reg
diff --git a/Documentation/devicetree/bindings/display/bridge/thine,thc63lvd1024.txt b/Documentation/devicetree/bindings/display/bridge/thine,thc63lvd1024.txt
deleted file mode 100644 (file)
index d17d1e5..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-Thine Electronics THC63LVD1024 LVDS decoder
--------------------------------------------
-
-The THC63LVD1024 is a dual link LVDS receiver designed to convert LVDS streams
-to parallel data outputs. The chip supports single/dual input/output modes,
-handling up to two LVDS input streams and up to two digital CMOS/TTL outputs.
-
-Single or dual operation mode, output data mapping and DDR output modes are
-configured through input signals and the chip does not expose any control bus.
-
-Required properties:
-- compatible: Shall be "thine,thc63lvd1024"
-- vcc-supply: Power supply for TTL output, TTL CLOCKOUT signal, LVDS input,
-  PPL and digital circuitry
-
-Optional properties:
-- powerdown-gpios: Power down GPIO signal, pin name "/PDWN". Active low
-- oe-gpios: Output enable GPIO signal, pin name "OE". Active high
-
-The THC63LVD1024 video port connections are modeled according
-to OF graph bindings specified by Documentation/devicetree/bindings/graph.txt
-
-Required video port nodes:
-- port@0: First LVDS input port
-- port@2: First digital CMOS/TTL parallel output
-
-Optional video port nodes:
-- port@1: Second LVDS input port
-- port@3: Second digital CMOS/TTL parallel output
-
-The device can operate in single-link mode or dual-link mode. In single-link
-mode, all pixels are received on port@0, and port@1 shall not contain any
-endpoint. In dual-link mode, even-numbered pixels are received on port@0 and
-odd-numbered pixels on port@1, and both port@0 and port@1 shall contain
-endpoints.
-
-Example:
---------
-
-       thc63lvd1024: lvds-decoder {
-               compatible = "thine,thc63lvd1024";
-
-               vcc-supply = <&reg_lvds_vcc>;
-               powerdown-gpios = <&gpio4 15 GPIO_ACTIVE_LOW>;
-
-               ports {
-                       #address-cells = <1>;
-                       #size-cells = <0>;
-
-                       port@0 {
-                               reg = <0>;
-
-                               lvds_dec_in_0: endpoint {
-                                       remote-endpoint = <&lvds_out>;
-                               };
-                       };
-
-                       port@2{
-                               reg = <2>;
-
-                               lvds_dec_out_2: endpoint {
-                                       remote-endpoint = <&adv7511_in>;
-                               };
-                       };
-               };
-       };
diff --git a/Documentation/devicetree/bindings/display/bridge/thine,thc63lvd1024.yaml b/Documentation/devicetree/bindings/display/bridge/thine,thc63lvd1024.yaml
new file mode 100644 (file)
index 0000000..469ac4a
--- /dev/null
@@ -0,0 +1,121 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/bridge/thine,thc63lvd1024.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Thine Electronics THC63LVD1024 LVDS Decoder
+
+maintainers:
+  - Jacopo Mondi <jacopo+renesas@jmondi.org>
+  - Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
+
+description: |
+  The THC63LVD1024 is a dual link LVDS receiver designed to convert LVDS
+  streams to parallel data outputs. The chip supports single/dual input/output
+  modes, handling up to two LVDS input streams and up to two digital CMOS/TTL
+  outputs.
+
+  Single or dual operation mode, output data mapping and DDR output modes are
+  configured through input signals and the chip does not expose any control
+  bus.
+
+properties:
+  compatible:
+    const: thine,thc63lvd1024
+
+  ports:
+    type: object
+    description: |
+      This device has four video ports. Their connections are modeled using the
+      OF graph bindings specified in Documentation/devicetree/bindings/graph.txt.
+
+      The device can operate in single-link mode or dual-link mode. In
+      single-link mode, all pixels are received on port@0, and port@1 shall not
+      contain any endpoint. In dual-link mode, even-numbered pixels are
+      received on port@0 and odd-numbered pixels on port@1, and both port@0 and
+      port@1 shall contain endpoints.
+
+    properties:
+      '#address-cells':
+        const: 1
+
+      '#size-cells':
+        const: 0
+
+      port@0:
+        type: object
+        description: First LVDS input port
+
+      port@1:
+        type: object
+        description: Second LVDS input port
+
+      port@2:
+        type: object
+        description: First digital CMOS/TTL parallel output
+
+      port@3:
+        type: object
+        description: Second digital CMOS/TTL parallel output
+
+    required:
+      - port@0
+      - port@2
+
+    additionalProperties: false
+
+  oe-gpios:
+    maxItems: 1
+    description: Output enable GPIO signal, pin name "OE", active high.
+
+  powerdown-gpios:
+    maxItems: 1
+    description: Power down GPIO signal, pin name "/PDWN", active low.
+
+  vcc-supply:
+    maxItems: 1
+    description:
+      Power supply for the TTL output, TTL CLOCKOUT signal, LVDS input, PLL and
+      digital circuitry.
+
+required:
+  - compatible
+  - ports
+  - vcc-supply
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    lvds-decoder {
+        compatible = "thine,thc63lvd1024";
+
+        vcc-supply = <&reg_lvds_vcc>;
+        powerdown-gpios = <&gpio4 15 GPIO_ACTIVE_LOW>;
+
+        ports {
+            #address-cells = <1>;
+            #size-cells = <0>;
+
+            port@0 {
+                reg = <0>;
+
+                lvds_dec_in_0: endpoint {
+                    remote-endpoint = <&lvds_out>;
+                };
+            };
+
+            port@2 {
+                reg = <2>;
+
+                lvds_dec_out_2: endpoint {
+                    remote-endpoint = <&adv7511_in>;
+                };
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/bridge/ti,ths813x.txt b/Documentation/devicetree/bindings/display/bridge/ti,ths813x.txt
deleted file mode 100644 (file)
index df3d7c1..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-THS8134 and THS8135 Video DAC
------------------------------
-
-This is the binding for Texas Instruments THS8134, THS8134A, THS8134B and
-THS8135 Video DAC bridges.
-
-Required properties:
-
-- compatible: Must be one of
-  "ti,ths8134"
-  "ti,ths8134a," "ti,ths8134"
-  "ti,ths8134b", "ti,ths8134"
-  "ti,ths8135"
-
-Required nodes:
-
-This device has two video ports. Their connections are modelled using the OF
-graph bindings specified in Documentation/devicetree/bindings/graph.txt.
-
-- Video port 0 for RGB input
-- Video port 1 for VGA output
-
-Example
--------
-
-vga-bridge {
-       compatible = "ti,ths8135";
-       #address-cells = <1>;
-       #size-cells = <0>;
-
-       ports {
-               #address-cells = <1>;
-               #size-cells = <0>;
-
-               port@0 {
-                       reg = <0>;
-
-                       vga_bridge_in: endpoint {
-                               remote-endpoint = <&lcdc_out_vga>;
-                       };
-               };
-
-               port@1 {
-                       reg = <1>;
-
-                       vga_bridge_out: endpoint {
-                               remote-endpoint = <&vga_con_in>;
-                       };
-               };
-       };
-};
index fd986c36c7373889a5da1494bb0ffbb922fc5e23..85b71b1fd28a28b4bc32e45ec49df508e3efdcc2 100644 (file)
@@ -28,7 +28,7 @@ description: |
 
 properties:
   $nodename:
-    pattern: "^dsi-controller(@.*)?$"
+    pattern: "^dsi(@.*)?$"
 
   "#address-cells":
     const: 1
@@ -76,7 +76,7 @@ patternProperties:
 examples:
   - |
     #include <dt-bindings/gpio/gpio.h>
-    dsi-controller@a0351000 {
+    dsi@a0351000 {
         reg = <0xa0351000 0x1000>;
         #address-cells = <1>;
         #size-cells = <0>;
index 58914cf681b8926b6958be4dc7a53581727e68fb..77def4456706b463e02e14fa504e179a865688b1 100644 (file)
@@ -17,6 +17,9 @@ Required properties:
   Documentation/devicetree/bindings/graph.txt. This port should be connected
   to the input port of an attached HDMI or LVDS encoder chip.
 
+Optional properties:
+- pinctrl-names: Contain "default" and "sleep".
+
 Example:
 
 dpi0: dpi@1401d000 {
@@ -27,6 +30,9 @@ dpi0: dpi@1401d000 {
                 <&mmsys CLK_MM_DPI_ENGINE>,
                 <&apmixedsys CLK_APMIXED_TVDPLL>;
        clock-names = "pixel", "engine", "pll";
+       pinctrl-names = "default", "sleep";
+       pinctrl-0 = <&dpi_pin_func>;
+       pinctrl-1 = <&dpi_pin_idle>;
 
        port {
                dpi0_out: endpoint {
index a19a6cc375ed7280a016a31c35dae8aa3734d244..8e4729de8c85d983ddf9e4b25a1ff80e2badeeac 100644 (file)
@@ -33,6 +33,13 @@ Required properties:
 - #clock-cells: must be <0>;
 - #phy-cells: must be <0>.
 
+Optional properties:
+- drive-strength-microamp: adjust driving current, should be 3000 ~ 6000. And
+                                                  the step is 200.
+- nvmem-cells: A phandle to the calibration data provided by a nvmem device. If
+               unspecified default values shall be used.
+- nvmem-cell-names: Should be "calibration-data"
+
 Example:
 
 mipi_tx0: mipi-dphy@10215000 {
@@ -42,6 +49,9 @@ mipi_tx0: mipi-dphy@10215000 {
        clock-output-names = "mipi_tx0_pll";
        #clock-cells = <0>;
        #phy-cells = <0>;
+       drive-strength-microamp = <4600>;
+       nvmem-cells= <&mipi_tx_calibration>;
+       nvmem-cell-names = "calibration-data";
 };
 
 dsi0: dsi@1401b000 {
diff --git a/Documentation/devicetree/bindings/display/panel/arm,versatile-tft-panel.txt b/Documentation/devicetree/bindings/display/panel/arm,versatile-tft-panel.txt
deleted file mode 100644 (file)
index 0601a9e..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-ARM Versatile TFT Panels
-
-These panels are connected to the daughterboards found on the
-ARM Versatile reference designs.
-
-This device node must appear as a child to a "syscon"-compatible
-node.
-
-Required properties:
-- compatible: should be "arm,versatile-tft-panel"
-
-Required subnodes:
-- port: see display/panel/panel-common.yaml, graph.txt
-
-
-Example:
-
-sysreg@0 {
-       compatible = "arm,versatile-sysreg", "syscon", "simple-mfd";
-       reg = <0x00000 0x1000>;
-
-       panel: display@0 {
-               compatible = "arm,versatile-tft-panel";
-
-               port {
-                       panel_in: endpoint {
-                               remote-endpoint = <&foo>;
-                       };
-               };
-       };
-};
diff --git a/Documentation/devicetree/bindings/display/panel/arm,versatile-tft-panel.yaml b/Documentation/devicetree/bindings/display/panel/arm,versatile-tft-panel.yaml
new file mode 100644 (file)
index 0000000..41fd571
--- /dev/null
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/arm,versatile-tft-panel.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM Versatile TFT Panels
+
+maintainers:
+  - Linus Walleij <linus.walleij@linaro.org>
+
+description: |
+  These panels are connected to the daughterboards found on the
+  ARM Versatile reference designs.
+
+  This device node must appear as a child to a "syscon"-compatible
+  node.
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: arm,versatile-tft-panel
+
+  port: true
+
+required:
+  - compatible
+  - port
+
+additionalProperties: false
+
+examples:
+  - |
+    sysreg {
+        compatible = "arm,versatile-sysreg", "syscon", "simple-mfd";
+        reg = <0x00000 0x1000>;
+
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        panel {
+            compatible = "arm,versatile-tft-panel";
+
+            port {
+                panel_in: endpoint {
+                    remote-endpoint = <&foo>;
+                };
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/asus,z00t-tm5p5-nt35596.yaml b/Documentation/devicetree/bindings/display/panel/asus,z00t-tm5p5-nt35596.yaml
new file mode 100644 (file)
index 0000000..083d2b9
--- /dev/null
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/asus,z00t-tm5p5-nt35596.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ASUS Z00T TM5P5 NT35596 5.5" 1080×1920 LCD Panel
+
+maintainers:
+  - Konrad Dybcio <konradybcio@gmail.com>
+
+description: |+
+  This panel seems to only be found in the Asus Z00T
+  smartphone and we have no straightforward way of
+  actually getting the correct model number,
+  as no schematics are released publicly.
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: asus,z00t-tm5p5-n35596
+  reg: true
+  reset-gpios: true
+  vdd-supply:
+     description: core voltage supply
+  vddio-supply:
+     description: vddio supply
+
+required:
+  - compatible
+  - reg
+  - vdd-supply
+  - vddio-supply
+  - reset-gpios
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    dsi {
+            #address-cells = <1>;
+            #size-cells = <0>;
+            panel@0 {
+                    reg = <0>;
+
+                    compatible = "asus,z00t-tm5p5-n35596";
+
+                    vdd-supply = <&pm8916_l8>;
+                    vddio-supply = <&pm8916_l6>;
+                    reset-gpios = <&msmgpio 25 GPIO_ACTIVE_HIGH>;
+            };
+    };
diff --git a/Documentation/devicetree/bindings/display/panel/boe,himax8279d.txt b/Documentation/devicetree/bindings/display/panel/boe,himax8279d.txt
deleted file mode 100644 (file)
index 3caea21..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-Boe Himax8279d 1200x1920 TFT LCD panel
-
-Required properties:
-- compatible: should be "boe,himax8279d8p" and one of: "boe,himax8279d10p"
-- reg: DSI virtual channel of the peripheral
-- enable-gpios: panel enable gpio
-- pp33-gpios: a GPIO phandle for the 3.3v pin that provides the supply voltage
-- pp18-gpios: a GPIO phandle for the 1.8v pin that provides the supply voltage
-
-Optional properties:
-- backlight: phandle of the backlight device attached to the panel
-
-Example:
-
-       &mipi_dsi {
-               panel {
-                       compatible = "boe,himax8279d8p", "boe,himax8279d10p";
-                       reg = <0>;
-                       backlight = <&backlight>;
-                       enable-gpios = <&gpio 45 GPIO_ACTIVE_HIGH>;
-                       pp33-gpios = <&gpio 35 GPIO_ACTIVE_HIGH>;
-                       pp18-gpios = <&gpio 36 GPIO_ACTIVE_HIGH>;
-               };
-       };
diff --git a/Documentation/devicetree/bindings/display/panel/boe,himax8279d.yaml b/Documentation/devicetree/bindings/display/panel/boe,himax8279d.yaml
new file mode 100644 (file)
index 0000000..272a3a0
--- /dev/null
@@ -0,0 +1,59 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/boe,himax8279d.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Boe Himax8279d 1200x1920 TFT LCD panel
+
+maintainers:
+  - Jerry Han <jerry.han.hq@gmail.com>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    items:
+      - const: boe,himax8279d8p
+      - const: boe,himax8279d10p
+
+  backlight: true
+  enable-gpios: true
+  reg: true
+
+  pp33-gpios:
+    maxItems: 1
+    description: GPIO for the 3.3v pin that provides the supply voltage
+
+  pp18-gpios:
+    maxItems: 1
+    description: GPIO for the 1.8v pin that provides the supply voltage
+
+required:
+  - compatible
+  - reg
+  - enable-gpios
+  - pp33-gpios
+  - pp18-gpios
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    dsi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+        panel@0 {
+            compatible = "boe,himax8279d8p", "boe,himax8279d10p";
+            reg = <0>;
+            backlight = <&backlight>;
+            enable-gpios = <&gpio 45 GPIO_ACTIVE_HIGH>;
+            pp33-gpios = <&gpio 35 GPIO_ACTIVE_HIGH>;
+            pp18-gpios = <&gpio 36 GPIO_ACTIVE_HIGH>;
+        };
+    };
+
+...
index 74021345913428f3d67ef4f2c1598799cba796c7..7f5df585101731a4edd7869849ea4125802ba437 100644 (file)
@@ -24,6 +24,8 @@ properties:
       - boe,tv101wum-n53
         # AUO B101UAN08.3 10.1" WUXGA TFT LCD panel
       - auo,b101uan08.3
+        # BOE TV105WUM-NW0 10.5" WUXGA TFT LCD panel
+      - boe,tv105wum-nw0
 
   reg:
     description: the virtual channel number of a DSI peripheral
index c8c0c9cb0492bffe714252277dd02c8dc9caef06..56903ded005e5c6206a96c7ea37cb8bd8664e390 100644 (file)
@@ -4,7 +4,7 @@
 $id: http://devicetree.org/schemas/display/panel/display-timings.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
-title: display timing bindings
+title: display timings bindings
 
 maintainers:
   - Thierry Reding <thierry.reding@gmail.com>
@@ -14,7 +14,7 @@ maintainers:
 description: |
   A display panel may be able to handle several display timings,
   with different resolutions.
-  The display-timings node makes it possible to specify the timing
+  The display-timings node makes it possible to specify the timings
   and to specify the timing that is native for the display.
 
 properties:
@@ -25,8 +25,8 @@ properties:
     $ref: /schemas/types.yaml#/definitions/phandle
     description: |
       The default display timing is the one specified as native-mode.
-      If no native-mode is specified then the first node is assumed the
-      native mode.
+      If no native-mode is specified then the first node is assumed
+      to be the native mode.
 
 patternProperties:
   "^timing":
diff --git a/Documentation/devicetree/bindings/display/panel/feiyang,fy07024di26a30d.txt b/Documentation/devicetree/bindings/display/panel/feiyang,fy07024di26a30d.txt
deleted file mode 100644 (file)
index 82caa7b..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-Feiyang FY07024DI26A30-D 7" MIPI-DSI LCD Panel
-
-Required properties:
-- compatible: must be "feiyang,fy07024di26a30d"
-- reg: DSI virtual channel used by that screen
-- avdd-supply: analog regulator dc1 switch
-- dvdd-supply: 3v3 digital regulator
-- reset-gpios: a GPIO phandle for the reset pin
-
-Optional properties:
-- backlight: phandle for the backlight control.
-
-panel@0 {
-       compatible = "feiyang,fy07024di26a30d";
-       reg = <0>;
-       avdd-supply = <&reg_dc1sw>;
-       dvdd-supply = <&reg_dldo2>;
-       reset-gpios = <&pio 3 24 GPIO_ACTIVE_HIGH>; /* LCD-RST: PD24 */
-       backlight = <&backlight>;
-};
diff --git a/Documentation/devicetree/bindings/display/panel/feiyang,fy07024di26a30d.yaml b/Documentation/devicetree/bindings/display/panel/feiyang,fy07024di26a30d.yaml
new file mode 100644 (file)
index 0000000..95acf9e
--- /dev/null
@@ -0,0 +1,58 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/feiyang,fy07024di26a30d.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Feiyang FY07024DI26A30-D 7" MIPI-DSI LCD Panel
+
+maintainers:
+  - Jagan Teki <jagan@amarulasolutions.com>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: feiyang,fy07024di26a30d
+
+  reg:
+    description: DSI virtual channel used by that screen
+    maxItems: 1
+
+  avdd-supply:
+    description: analog regulator dc1 switch
+
+  dvdd-supply:
+    description: 3v3 digital regulator
+
+  reset-gpios: true
+
+  backlight: true
+
+required:
+  - compatible
+  - reg
+  - avdd-supply
+  - dvdd-supply
+  - reset-gpios
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    dsi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        panel@0 {
+            compatible = "feiyang,fy07024di26a30d";
+            reg = <0>;
+            avdd-supply = <&reg_dc1sw>;
+            dvdd-supply = <&reg_dldo2>;
+            reset-gpios = <&pio 3 24 GPIO_ACTIVE_HIGH>; /* LCD-RST: PD24 */
+            backlight = <&backlight>;
+        };
+    };
diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,ili9322.txt b/Documentation/devicetree/bindings/display/panel/ilitek,ili9322.txt
deleted file mode 100644 (file)
index 3d5ce6a..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-Ilitek ILI9322 TFT panel driver with SPI control bus
-
-This is a driver for 320x240 TFT panels, accepting a variety of input
-streams that get adapted and scaled to the panel. The panel output has
-960 TFT source driver pins and 240 TFT gate driver pins, VCOM, VCOML and
-VCOMH outputs.
-
-Required properties:
-  - compatible: "dlink,dir-685-panel", "ilitek,ili9322"
-    (full system-specific compatible is always required to look up configuration)
-  - reg: address of the panel on the SPI bus
-
-Optional properties:
-  - vcc-supply: core voltage supply, see regulator/regulator.txt
-  - iovcc-supply: voltage supply for the interface input/output signals,
-    see regulator/regulator.txt
-  - vci-supply: voltage supply for analog parts, see regulator/regulator.txt
-  - reset-gpios: a GPIO spec for the reset pin, see gpio/gpio.txt
-
-  The following optional properties only apply to RGB and YUV input modes and
-  can be omitted for BT.656 input modes:
-
-  - pixelclk-active: see display/panel/display-timing.txt
-  - de-active: see display/panel/display-timing.txt
-  - hsync-active: see display/panel/display-timing.txt
-  - vsync-active: see display/panel/display-timing.txt
-
-The panel must obey the rules for a SPI slave device as specified in
-spi/spi-bus.txt
-
-The device node can contain one 'port' child node with one child
-'endpoint' node, according to the bindings defined in
-media/video-interfaces.txt. This node should describe panel's video bus.
-
-Example:
-
-panel: display@0 {
-       compatible = "dlink,dir-685-panel", "ilitek,ili9322";
-       reg = <0>;
-       vcc-supply = <&vdisp>;
-       iovcc-supply = <&vdisp>;
-       vci-supply = <&vdisp>;
-
-       port {
-               panel_in: endpoint {
-                       remote-endpoint = <&display_out>;
-               };
-       };
-};
diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,ili9322.yaml b/Documentation/devicetree/bindings/display/panel/ilitek,ili9322.yaml
new file mode 100644 (file)
index 0000000..177d48c
--- /dev/null
@@ -0,0 +1,71 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/ilitek,ili9322.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Ilitek ILI9322 TFT panel driver with SPI control bus
+
+maintainers:
+  - Linus Walleij <linus.walleij@linaro.org>
+
+description: |
+  This is a driver for 320x240 TFT panels, accepting a variety of input
+  streams that get adapted and scaled to the panel. The panel output has
+  960 TFT source driver pins and 240 TFT gate driver pins, VCOM, VCOML and
+  VCOMH outputs.
+
+  The panel must obey the rules for a SPI slave device as specified in
+  spi/spi-controller.yaml
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    items:
+      - enum:
+        - dlink,dir-685-panel
+
+      - const: ilitek,ili9322
+
+  reset-gpios: true
+  port: true
+
+  vcc-supply:
+    description: Core voltage supply
+
+  iovcc-supply:
+    description: Voltage supply for the interface input/output signals
+
+  vci-supply:
+    description: Voltage supply for analog parts
+
+required:
+  - compatible
+  - reg
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    spi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        panel: display@0 {
+            compatible = "dlink,dir-685-panel", "ilitek,ili9322";
+            reg = <0>;
+            vcc-supply = <&vdisp>;
+            iovcc-supply = <&vdisp>;
+            vci-supply = <&vdisp>;
+
+            port {
+                panel_in: endpoint {
+                    remote-endpoint = <&display_out>;
+                };
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.txt b/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.txt
deleted file mode 100644 (file)
index 4a041ac..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-Ilitek ILI9881c based MIPI-DSI panels
-
-Required properties:
-  - compatible: must be "ilitek,ili9881c" and one of:
-    * "bananapi,lhr050h41"
-  - reg: DSI virtual channel used by that screen
-  - power-supply: phandle to the power regulator
-  - reset-gpios: a GPIO phandle for the reset pin
-
-Optional properties:
-  - backlight: phandle to the backlight used
-
-Example:
-panel@0 {
-       compatible = "bananapi,lhr050h41", "ilitek,ili9881c";
-       reg = <0>;
-       power-supply = <&reg_display>;
-       reset-gpios = <&r_pio 0 5 GPIO_ACTIVE_LOW>; /* PL05 */
-       backlight = <&pwm_bl>;
-};
diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml b/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml
new file mode 100644 (file)
index 0000000..a393322
--- /dev/null
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/ilitek,ili9881c.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Ilitek ILI9881c based MIPI-DSI panels
+
+maintainers:
+  - Maxime Ripard <mripard@kernel.org>
+
+properties:
+  compatible:
+    items:
+      - enum:
+        - bananapi,lhr050h41
+
+      - const: ilitek,ili9881c
+
+  backlight: true
+  power-supply: true
+  reg: true
+  reset-gpios: true
+
+required:
+  - compatible
+  - power-supply
+  - reg
+  - reset-gpios
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    dsi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        panel@0 {
+            compatible = "bananapi,lhr050h41", "ilitek,ili9881c";
+            reg = <0>;
+            power-supply = <&reg_display>;
+            reset-gpios = <&r_pio 0 5 GPIO_ACTIVE_LOW>; /* PL05 */
+            backlight = <&pwm_bl>;
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,p097pfg.txt b/Documentation/devicetree/bindings/display/panel/innolux,p097pfg.txt
deleted file mode 100644 (file)
index d1cab3a..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-Innolux P097PFG 9.7" 1536x2048 TFT LCD panel
-
-Required properties:
-- compatible: should be "innolux,p097pfg"
-- reg: DSI virtual channel of the peripheral
-- avdd-supply: phandle of the regulator that provides positive voltage
-- avee-supply: phandle of the regulator that provides negative voltage
-- enable-gpios: panel enable gpio
-
-Optional properties:
-- backlight: phandle of the backlight device attached to the panel
-
-Example:
-
-       &mipi_dsi {
-               panel@0 {
-                       compatible = "innolux,p079zca";
-                       reg = <0>;
-                       avdd-supply = <...>;
-                       avee-supply = <...>;
-                       backlight = <&backlight>;
-                       enable-gpios = <&gpio1 13 GPIO_ACTIVE_HIGH>;
-               };
-       };
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,p097pfg.yaml b/Documentation/devicetree/bindings/display/panel/innolux,p097pfg.yaml
new file mode 100644 (file)
index 0000000..5a5f071
--- /dev/null
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/innolux,p097pfg.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Innolux P097PFG 9.7" 1536x2048 TFT LCD panel
+
+maintainers:
+  - Lin Huang <hl@rock-chips.com>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: innolux,p097pfg
+
+  backlight: true
+  enable-gpios: true
+  reg: true
+
+  avdd-supply:
+    description: The regulator that provides positive voltage
+
+  avee-supply:
+    description: The regulator that provides negative voltage
+
+required:
+  - compatible
+  - reg
+  - avdd-supply
+  - avee-supply
+  - enable-gpios
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    dsi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        panel@0 {
+            compatible = "innolux,p097pfg";
+            reg = <0>;
+            avdd-supply = <&avdd>;
+            avee-supply = <&avee>;
+            backlight = <&backlight>;
+            enable-gpios = <&gpio1 13 GPIO_ACTIVE_HIGH>;
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,p120zdg-bf1.txt b/Documentation/devicetree/bindings/display/panel/innolux,p120zdg-bf1.txt
deleted file mode 100644 (file)
index 513f034..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-Innolux P120ZDG-BF1 12.02 inch eDP 2K display panel
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
-
-Required properties:
-- compatible: should be "innolux,p120zdg-bf1"
-- power-supply: regulator to provide the supply voltage
-
-Optional properties:
-- enable-gpios: GPIO pin to enable or disable the panel
-- backlight: phandle of the backlight device attached to the panel
-- no-hpd: If HPD isn't hooked up; add this property.
-
-Example:
-       panel_edp: panel-edp {
-               compatible = "innolux,p120zdg-bf1";
-               enable-gpios = <&msmgpio 31 GPIO_ACTIVE_LOW>;
-               power-supply = <&pm8916_l2>;
-               backlight = <&backlight>;
-               no-hpd;
-       };
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,p120zdg-bf1.yaml b/Documentation/devicetree/bindings/display/panel/innolux,p120zdg-bf1.yaml
new file mode 100644 (file)
index 0000000..243dac2
--- /dev/null
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/innolux,p120zdg-bf1.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Innolux P120ZDG-BF1 12.02 inch eDP 2K display panel
+
+maintainers:
+  - Sandeep Panda <spanda@codeaurora.org>
+  - Douglas Anderson <dianders@chromium.org>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: innolux,p120zdg-bf1
+
+  enable-gpios: true
+  power-supply: true
+  backlight: true
+  no-hpd: true
+
+required:
+  - compatible
+  - power-supply
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    panel_edp: panel-edp {
+        compatible = "innolux,p120zdg-bf1";
+        enable-gpios = <&msmgpio 31 GPIO_ACTIVE_LOW>;
+        power-supply = <&pm8916_l2>;
+        backlight = <&backlight>;
+        no-hpd;
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/jdi,lt070me05000.txt b/Documentation/devicetree/bindings/display/panel/jdi,lt070me05000.txt
deleted file mode 100644 (file)
index 4989c91..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-JDI model LT070ME05000 1200x1920 7" DSI Panel
-
-Required properties:
-- compatible: should be "jdi,lt070me05000"
-- vddp-supply: phandle of the regulator that provides the supply voltage
-  Power IC supply (3-5V)
-- iovcc-supply: phandle of the regulator that provides the supply voltage
-  IOVCC , power supply for LCM (1.8V)
-- enable-gpios: phandle of gpio for enable line
-  LED_EN, LED backlight enable, High active
-- reset-gpios: phandle of gpio for reset line
-  This should be 8mA, gpio can be configured using mux, pinctrl, pinctrl-names
-  XRES, Reset, Low active
-- dcdc-en-gpios: phandle of the gpio for power ic line
-  Power IC supply enable, High active
-
-Example:
-
-       dsi0: qcom,mdss_dsi@4700000 {
-               panel@0 {
-                       compatible = "jdi,lt070me05000";
-                       reg = <0>;
-
-                       vddp-supply = <&pm8921_l17>;
-                       iovcc-supply = <&pm8921_lvs7>;
-
-                       enable-gpios = <&pm8921_gpio 36 GPIO_ACTIVE_HIGH>;
-                       reset-gpios = <&tlmm_pinmux 54 GPIO_ACTIVE_LOW>;
-                       dcdc-en-gpios = <&pm8921_gpio 23 GPIO_ACTIVE_HIGH>;
-               };
-       };
diff --git a/Documentation/devicetree/bindings/display/panel/jdi,lt070me05000.yaml b/Documentation/devicetree/bindings/display/panel/jdi,lt070me05000.yaml
new file mode 100644 (file)
index 0000000..b8b9435
--- /dev/null
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/jdi,lt070me05000.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: JDI model LT070ME05000 1200x1920 7" DSI Panel
+
+maintainers:
+  - Vinay Simha BN <simhavcs@gmail.com>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: jdi,lt070me05000
+
+  enable-gpios: true
+  reg: true
+  reset-gpios: true
+
+  vddp-supply:
+    description: |
+      The regulator that provides the supply voltage Power IC supply (3-5V)
+
+  iovcc-supply:
+    description: |
+      The regulator that provides the supply voltage IOVCC,
+      power supply for LCM (1.8V)
+
+  dcdc-en-gpios:
+    description: |
+      phandle of the gpio for power ic line
+      Power IC supply enable, High active
+
+required:
+  - compatible
+  - reg
+  - vddp-supply
+  - iovcc-supply
+  - enable-gpios
+  - reset-gpios
+  - dcdc-en-gpios
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    dsi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        panel@0 {
+            compatible = "jdi,lt070me05000";
+            reg = <0>;
+
+            vddp-supply = <&pm8921_l17>;
+            iovcc-supply = <&pm8921_lvs7>;
+
+            enable-gpios = <&pm8921_gpio 36 GPIO_ACTIVE_HIGH>;
+            reset-gpios = <&tlmm_pinmux 54 GPIO_ACTIVE_LOW>;
+            dcdc-en-gpios = <&pm8921_gpio 23 GPIO_ACTIVE_HIGH>;
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/kingdisplay,kd035g6-54nt.txt b/Documentation/devicetree/bindings/display/panel/kingdisplay,kd035g6-54nt.txt
deleted file mode 100644 (file)
index fa95960..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-King Display KD035G6-54NT 3.5" (320x240 pixels) 24-bit TFT LCD panel
-
-Required properties:
-- compatible: should be "kingdisplay,kd035g6-54nt"
-- power-supply: See panel-common.txt
-- reset-gpios: See panel-common.txt
-
-Optional properties:
-- backlight: see panel-common.txt
-
-The generic bindings for the SPI slaves documented in [1] also apply.
-
-The device node can contain one 'port' child node with one child
-'endpoint' node, according to the bindings defined in [2]. This
-node should describe panel's video bus.
-
-[1]: Documentation/devicetree/bindings/spi/spi-bus.txt
-[2]: Documentation/devicetree/bindings/graph.txt
-
-Example:
-
-&spi {
-       panel@0 {
-               compatible = "kingdisplay,kd035g6-54nt";
-               reg = <0>;
-
-               spi-max-frequency = <3125000>;
-               spi-3wire;
-               spi-cs-high;
-
-               reset-gpios = <&gpe 2 GPIO_ACTIVE_LOW>;
-
-               backlight = <&backlight>;
-               power-supply = <&ldo6>;
-
-               port {
-                       panel_input: endpoint {
-                               remote-endpoint = <&panel_output>;
-                       };
-               };
-       };
-};
diff --git a/Documentation/devicetree/bindings/display/panel/kingdisplay,kd035g6-54nt.yaml b/Documentation/devicetree/bindings/display/panel/kingdisplay,kd035g6-54nt.yaml
new file mode 100644 (file)
index 0000000..6960036
--- /dev/null
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/kingdisplay,kd035g6-54nt.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: King Display KD035G6-54NT 3.5" (320x240 pixels) 24-bit TFT LCD panel
+
+description: |
+  The panel must obey the rules for a SPI slave device as specified in
+  spi/spi-controller.yaml
+
+maintainers:
+  - Paul Cercueil <paul@crapouillou.net>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: kingdisplay,kd035g6-54nt
+
+  backlight: true
+  port: true
+  power-supply: true
+  reg: true
+  reset-gpios: true
+
+required:
+  - compatible
+  - power-supply
+  - reset-gpios
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    spi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        panel@0 {
+            compatible = "kingdisplay,kd035g6-54nt";
+            reg = <0>;
+
+            spi-max-frequency = <3125000>;
+            spi-3wire;
+            spi-cs-high;
+
+            reset-gpios = <&gpe 2 GPIO_ACTIVE_LOW>;
+
+            backlight = <&backlight>;
+            power-supply = <&ldo6>;
+
+            port {
+                panel_input: endpoint {
+                    remote-endpoint = <&panel_output>;
+                };
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/kingdisplay,kd097d04.txt b/Documentation/devicetree/bindings/display/panel/kingdisplay,kd097d04.txt
deleted file mode 100644 (file)
index cfefff6..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-Kingdisplay KD097D04 9.7" 1536x2048 TFT LCD panel
-
-Required properties:
-- compatible: should be "kingdisplay,kd097d04"
-- reg: DSI virtual channel of the peripheral
-- power-supply: phandle of the regulator that provides the supply voltage
-- enable-gpios: panel enable gpio
-
-Optional properties:
-- backlight: phandle of the backlight device attached to the panel
-
-Example:
-
-       &mipi_dsi {
-               panel@0 {
-                       compatible = "kingdisplay,kd097d04";
-                       reg = <0>;
-                       power-supply = <...>;
-                       backlight = <&backlight>;
-                       enable-gpios = <&gpio1 13 GPIO_ACTIVE_HIGH>;
-               };
-       };
diff --git a/Documentation/devicetree/bindings/display/panel/leadtek,ltk050h3146w.yaml b/Documentation/devicetree/bindings/display/panel/leadtek,ltk050h3146w.yaml
new file mode 100644 (file)
index 0000000..a372bdc
--- /dev/null
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/leadtek,ltk050h3146w.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Leadtek LTK050H3146W 5.0in 720x1280 DSI panel
+
+maintainers:
+  - Heiko Stuebner <heiko.stuebner@theobroma-systems.com>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    enum:
+      - leadtek,ltk050h3146w
+      - leadtek,ltk050h3146w-a2
+  reg: true
+  backlight: true
+  reset-gpios: true
+  iovcc-supply:
+     description: regulator that supplies the iovcc voltage
+  vci-supply:
+     description: regulator that supplies the vci voltage
+
+required:
+  - compatible
+  - reg
+  - backlight
+  - iovcc-supply
+  - vci-supply
+
+additionalProperties: false
+
+examples:
+  - |
+    dsi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+        panel@0 {
+            compatible = "leadtek,ltk050h3146w";
+            reg = <0>;
+            backlight = <&backlight>;
+            iovcc-supply = <&vcc_1v8>;
+            vci-supply = <&vcc3v3_lcd>;
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/lg,acx467akm-7.txt b/Documentation/devicetree/bindings/display/panel/lg,acx467akm-7.txt
deleted file mode 100644 (file)
index fc1e1b3..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-LG ACX467AKM-7 4.95" 1080×1920 LCD Panel
-
-Required properties:
-- compatible: must be "lg,acx467akm-7"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/lg,ld070wx3-sl01.txt b/Documentation/devicetree/bindings/display/panel/lg,ld070wx3-sl01.txt
deleted file mode 100644 (file)
index 5e649cb..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-LG Corporation 7" WXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "lg,ld070wx3-sl01"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/lg,lg4573.txt b/Documentation/devicetree/bindings/display/panel/lg,lg4573.txt
deleted file mode 100644 (file)
index 824441f..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-LG LG4573 TFT Liquid Crystal Display with SPI control bus
-
-Required properties:
-  - compatible: "lg,lg4573"
-  - reg: address of the panel on the SPI bus
-
-The panel must obey rules for SPI slave device specified in document [1].
-
-[1]: Documentation/devicetree/bindings/spi/spi-bus.txt
-
-Example:
-
-       lcd_panel: display@0 {
-               #address-cells = <1>;
-               #size-cells = <1>;
-               compatible = "lg,lg4573";
-               spi-max-frequency = <10000000>;
-               reg = <0>;
-       };
diff --git a/Documentation/devicetree/bindings/display/panel/lg,lg4573.yaml b/Documentation/devicetree/bindings/display/panel/lg,lg4573.yaml
new file mode 100644 (file)
index 0000000..b4314ce
--- /dev/null
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/lg,lg4573.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: LG LG4573 TFT Liquid Crystal Display with SPI control bus
+
+description: |
+  The panel must obey the rules for a SPI slave device as specified in
+  spi/spi-controller.yaml
+
+maintainers:
+  - Heiko Schocher <hs@denx.de>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: lg,lg4573
+
+  reg: true
+  spi-max-frequency: true
+
+required:
+  - compatible
+  - reg
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    spi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        lcd_panel: display@0 {
+            compatible = "lg,lg4573";
+            spi-max-frequency = <10000000>;
+            reg = <0>;
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/lg,lh500wx1-sd03.txt b/Documentation/devicetree/bindings/display/panel/lg,lh500wx1-sd03.txt
deleted file mode 100644 (file)
index a04fd2b..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-LG Corporation 5" HD TFT LCD panel
-
-Required properties:
-- compatible: should be "lg,lh500wx1-sd03"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/lgphilips,lb035q02.txt b/Documentation/devicetree/bindings/display/panel/lgphilips,lb035q02.txt
deleted file mode 100644 (file)
index 1a1e653..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-LG.Philips LB035Q02 Panel
-=========================
-
-Required properties:
-- compatible: "lgphilips,lb035q02"
-- enable-gpios: panel enable gpio
-
-Optional properties:
-- label: a symbolic name for the panel
-
-Required nodes:
-- Video port for DPI input
-
-Example
--------
-
-lcd-panel: panel@0 {
-       compatible = "lgphilips,lb035q02";
-       reg = <0>;
-       spi-max-frequency = <100000>;
-       spi-cpol;
-       spi-cpha;
-
-       label = "lcd";
-
-       enable-gpios = <&gpio7 7 0>;
-
-       port {
-               lcd_in: endpoint {
-                       remote-endpoint = <&dpi_out>;
-               };
-       };
-};
diff --git a/Documentation/devicetree/bindings/display/panel/lgphilips,lb035q02.yaml b/Documentation/devicetree/bindings/display/panel/lgphilips,lb035q02.yaml
new file mode 100644 (file)
index 0000000..830e335
--- /dev/null
@@ -0,0 +1,59 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/lgphilips,lb035q02.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: LG.Philips LB035Q02 Panel
+
+description: |
+  The panel must obey the rules for a SPI slave device as specified in
+  spi/spi-controller.yaml
+
+maintainers:
+  - Tomi Valkeinen <tomi.valkeinen@ti.com>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: lgphilips,lb035q02
+
+  label: true
+  enable-gpios: true
+  port: true
+
+required:
+  - compatible
+  - enable-gpios
+  - port
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    spi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        panel: panel@0 {
+            compatible = "lgphilips,lb035q02";
+            reg = <0>;
+            spi-max-frequency = <100000>;
+            spi-cpol;
+            spi-cpha;
+
+            label = "lcd";
+
+            enable-gpios = <&gpio7 7 0>;
+
+            port {
+                lcd_in: endpoint {
+                    remote-endpoint = <&dpi_out>;
+                };
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.txt b/Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.txt
deleted file mode 100644 (file)
index a89f9c8..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-Binding for Olimex Ltd. LCD-OLinuXino bridge panel.
-
-This device can be used as bridge between a host controller and LCD panels.
-Currently supported LCDs are:
-  - LCD-OLinuXino-4.3TS
-  - LCD-OLinuXino-5
-  - LCD-OLinuXino-7
-  - LCD-OLinuXino-10
-
-The panel itself contains:
-  - AT24C16C EEPROM holding panel identification and timing requirements
-  - AR1021 resistive touch screen controller (optional)
-  - FT5x6 capacitive touch screnn controller (optional)
-  - GT911/GT928 capacitive touch screen controller (optional)
-
-The above chips share same I2C bus. The EEPROM is factory preprogrammed with
-device information (id, serial, etc.) and timing requirements.
-
-Touchscreen bingings can be found in these files:
-  - input/touchscreen/goodix.txt
-  - input/touchscreen/edt-ft5x06.txt
-  - input/touchscreen/ar1021.txt
-
-Required properties:
-  - compatible: should be "olimex,lcd-olinuxino"
-  - reg: address of the configuration EEPROM, should be <0x50>
-  - power-supply: phandle of the regulator that provides the supply voltage
-
-Optional properties:
-  - enable-gpios: GPIO pin to enable or disable the panel
-  - backlight: phandle of the backlight device attacked to the panel
-
-Example:
-&i2c2 {
-       panel@50 {
-               compatible = "olimex,lcd-olinuxino";
-               reg = <0x50>;
-               power-supply = <&reg_vcc5v0>;
-               enable-gpios = <&pio 7 8 GPIO_ACTIVE_HIGH>;
-               backlight = <&backlight>;
-       };
-};
diff --git a/Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.yaml b/Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.yaml
new file mode 100644 (file)
index 0000000..2329d96
--- /dev/null
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/olimex,lcd-olinuxino.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Binding for Olimex Ltd. LCD-OLinuXino bridge panel.
+
+maintainers:
+  - Stefan Mavrodiev <stefan@olimex.com>
+
+description: |
+  This device can be used as bridge between a host controller and LCD panels.
+  Currently supported LCDs are:
+    - LCD-OLinuXino-4.3TS
+    - LCD-OLinuXino-5
+    - LCD-OLinuXino-7
+    - LCD-OLinuXino-10
+
+  The panel itself contains:
+    - AT24C16C EEPROM holding panel identification and timing requirements
+    - AR1021 resistive touch screen controller (optional)
+    - FT5x6 capacitive touch screnn controller (optional)
+    - GT911/GT928 capacitive touch screen controller (optional)
+
+  The above chips share same I2C bus. The EEPROM is factory preprogrammed with
+  device information (id, serial, etc.) and timing requirements.
+
+  Touchscreen bingings can be found in these files:
+    - input/touchscreen/goodix.yaml
+    - input/touchscreen/edt-ft5x06.txt
+    - input/touchscreen/ar1021.txt
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: olimex,lcd-olinuxino
+
+  backlight: true
+  enable-gpios: true
+  power-supply: true
+  reg: true
+
+required:
+  - compatible
+  - reg
+  - power-supply
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    i2c {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        panel@50 {
+            compatible = "olimex,lcd-olinuxino";
+            reg = <0x50>;
+            power-supply = <&reg_vcc5v0>;
+            enable-gpios = <&pio 7 8 GPIO_ACTIVE_HIGH>;
+            backlight = <&backlight>;
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/osddisplays,osd101t2587-53ts.txt b/Documentation/devicetree/bindings/display/panel/osddisplays,osd101t2587-53ts.txt
deleted file mode 100644 (file)
index 9d88e96..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-One Stop Displays OSD101T2587-53TS 10.1" 1920x1200 panel
-
-The panel is similar to OSD101T2045-53TS, but it needs additional
-MIPI_DSI_TURN_ON_PERIPHERAL message from the host.
-
-Required properties:
-- compatible: should be "osddisplays,osd101t2587-53ts"
-- power-supply: as specified in the base binding
-
-Optional properties:
-- backlight: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
index ed051ba12084b786fb0e3a767c46e8499f178f00..a747b755ad0609a1af45780ad626f4afccd290b1 100644 (file)
@@ -63,9 +63,9 @@ properties:
 
   display-timings:
     description:
-      Some display panels supports several resolutions with different timing.
+      Some display panels support several resolutions with different timings.
       The display-timings bindings supports specifying several timings and
-      optional specify which is the native mode.
+      optionally specifying which is the native mode.
     allOf:
       - $ref: display-timings.yaml#
 
@@ -96,6 +96,12 @@ properties:
       (hot plug detect) signal, but the signal isn't hooked up so we should
       hardcode the max delay from the panel spec when powering up the panel.
 
+  hpd-gpios:
+    maxItems: 1
+    description:
+      If Hot Plug Detect (HPD) is connected to a GPIO in the system rather
+      than a dedicated HPD pin the pin can be specified here.
+
   # Control I/Os
 
   # Many display panels can be controlled through pins driven by GPIOs. The nature
@@ -124,6 +130,13 @@ properties:
       while active. Active high reset signals can be supported by inverting the
       GPIO specifier polarity flag.
 
+  te-gpios:
+    maxItems: 1
+    description:
+      GPIO spec for the tearing effect synchronization signal.
+      The tearing effect signal is active high. Active low signals can be
+      supported by inverting the GPIO specifier polarity flag.
+
   # Power
   power-supply:
     description:
index b2e8742fd6af8cc9f099517f4b7abdd54da5c306..16778ce782fc2bde3bef74bfddc741281eeee159 100644 (file)
@@ -29,6 +29,20 @@ properties:
       # compatible must be listed in alphabetical order, ordered by compatible.
       # The description in the comment is mandatory for each compatible.
 
+        # AU Optronics Corporation 8.0" WUXGA TFT LCD panel
+      - auo,b080uan01
+        # Boe Corporation 8.0" WUXGA TFT LCD panel
+      - boe,tv080wum-nl0
+        # Kingdisplay KD097D04 9.7" 1536x2048 TFT LCD panel
+      - kingdisplay,kd097d04
+        # LG ACX467AKM-7 4.95" 1080×1920 LCD Panel
+      - lg,acx467akm-7
+        # LG Corporation 7" WXGA TFT LCD panel
+      - lg,ld070wx3-sl01
+        # One Stop Displays OSD101T2587-53TS 10.1" 1920x1200 panel
+      - osddisplays,osd101t2587-53ts
+        # Panasonic 10" WUXGA TFT LCD panel
+      - panasonic,vvx10f004b00
         # Panasonic 10" WUXGA TFT LCD panel
       - panasonic,vvx10f034n00
 
index 393ffc6acbba1cae8a7fd596debf3209ac3b5409..d6cca1479633ab01b015b6be26b897ed87a9f2ab 100644 (file)
@@ -33,8 +33,6 @@ properties:
       - ampire,am-480272h3tmqw-t01h
         # Ampire AM-800480R3TMQW-A1H 7.0" WVGA TFT LCD panel
       - ampire,am800480r3tmqwa1h
-        # AU Optronics Corporation 8.0" WUXGA TFT LCD panel
-      - auo,b080uan01
         # AU Optronics Corporation 10.1" WSVGA TFT LCD panel
       - auo,b101aw03
         # AU Optronics Corporation 10.1" WSVGA TFT LCD panel
@@ -55,10 +53,16 @@ properties:
       - auo,g101evn010
         # AU Optronics Corporation 10.4" (800x600) color TFT LCD panel
       - auo,g104sn02
+        # AU Optronics Corporation 12.1" (1280x800) TFT LCD panel
+      - auo,g121ean01
         # AU Optronics Corporation 13.3" FHD (1920x1080) TFT LCD panel
       - auo,g133han01
+        # AU Optronics Corporation 15.6" (1366x768) TFT LCD panel
+      - auo,g156xtn01
         # AU Optronics Corporation 18.5" FHD (1920x1080) TFT LCD panel
       - auo,g185han01
+        # AU Optronics Corporation 19.0" (1280x1024) TFT LCD panel
+      - auo,g190ean01
         # AU Optronics Corporation 31.5" FHD (1920x1080) TFT LCD panel
       - auo,p320hvn03
         # AU Optronics Corporation 21.5" FHD (1920x1080) color TFT LCD panel
@@ -69,10 +73,12 @@ properties:
       - boe,hv070wsa-100
         # BOE OPTOELECTRONICS TECHNOLOGY 10.1" WXGA TFT LCD panel
       - boe,nv101wxmn51
+        # BOE NV133FHM-N61 13.3" FHD (1920x1080) TFT LCD Panel
+      - boe,nv133fhm-n61
+        # BOE NV133FHM-N62 13.3" FHD (1920x1080) TFT LCD Panel
+      - boe,nv133fhm-n62
         # BOE NV140FHM-N49 14.0" FHD a-Si FT panel
       - boe,nv140fhmn49
-        # Boe Corporation 8.0" WUXGA TFT LCD panel
-      - boe,tv080wum-nl0
         # CDTech(H.K.) Electronics Limited 4.3" 480x272 color TFT-LCD panel
       - cdtech,s043wq26h-ct7
         # CDTech(H.K.) Electronics Limited 7" 800x480 color TFT-LCD panel
@@ -82,6 +88,8 @@ properties:
         # Chunghwa Picture Tubes Ltd. 10.1" WXGA TFT LCD panel
       - chunghwa,claa101wa01a
         # Chunghwa Picture Tubes Ltd. 10.1" WXGA TFT LCD panel
+      - chunghwa,claa101wb01
+        # Chunghwa Picture Tubes Ltd. 10.1" WXGA TFT LCD panel
       - chunghwa,claa101wb03
         # DataImage, Inc. 7" WVGA (800x480) TFT LCD panel with 24-bit parallel interface.
       - dataimage,scf0700c48ggu18
@@ -127,6 +135,8 @@ properties:
       - hannstar,hsd100pxn1
         # Hitachi Ltd. Corporation 9" WVGA (800x480) TFT LCD panel
       - hit,tx23d38vm0caa
+        # InfoVision Optoelectronics M133NWF4 R0 13.3" FHD (1920x1080) TFT LCD panel
+      - ivo,m133nwf4-r0
         # Innolux AT043TN24 4.3" WQVGA TFT LCD panel
       - innolux,at043tn24
         # Innolux AT070TN92 7.0" WQVGA TFT LCD panel
@@ -155,6 +165,8 @@ properties:
       - lemaker,bl035-rgb-002
         # LG 7" (800x480 pixels) TFT LCD panel
       - lg,lb070wv8
+        # LG Corporation 5" HD TFT LCD panel
+      - lg,lh500wx1-sd03
         # LG LP079QX1-SP0V 7.9" (1536x2048 pixels) TFT LCD panel
       - lg,lp079qx1-sp0v
         # LG 9.7" (2048x1536 pixels) TFT LCD panel
@@ -227,6 +239,8 @@ properties:
       - sharp,ls020b1dd01d
         # Shelly SCA07010-BFN-LNN 7.0" WVGA TFT LCD panel
       - shelly,sca07010-bfn-lnn
+        # Starry KR070PE2T 7" WVGA TFT LCD panel
+      - starry,kr070pe2t
         # Starry 12.2" (1920x1200 pixels) TFT LCD panel
       - starry,kr122ea0sra
         # Tianma Micro-electronics TM070JDHG30 7.0" WXGA TFT LCD panel
diff --git a/Documentation/devicetree/bindings/display/panel/raydium,rm67191.txt b/Documentation/devicetree/bindings/display/panel/raydium,rm67191.txt
deleted file mode 100644 (file)
index 1042469..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-Raydium RM67171 OLED LCD panel with MIPI-DSI protocol
-
-Required properties:
-- compatible:          "raydium,rm67191"
-- reg:                 virtual channel for MIPI-DSI protocol
-                       must be <0>
-- dsi-lanes:           number of DSI lanes to be used
-                       must be <3> or <4>
-- port:                input port node with endpoint definition as
-                       defined in Documentation/devicetree/bindings/graph.txt;
-                       the input port should be connected to a MIPI-DSI device
-                       driver
-
-Optional properties:
-- reset-gpios:         a GPIO spec for the RST_B GPIO pin
-- v3p3-supply:         phandle to 3.3V regulator that powers the VDD_3V3 pin
-- v1p8-supply:         phandle to 1.8V regulator that powers the VDD_1V8 pin
-- width-mm:            see panel-common.txt
-- height-mm:           see panel-common.txt
-- video-mode:          0 - burst-mode
-                       1 - non-burst with sync event
-                       2 - non-burst with sync pulse
-
-Example:
-
-       panel@0 {
-               compatible = "raydium,rm67191";
-               reg = <0>;
-               pinctrl-0 = <&pinctrl_mipi_dsi_0_1_en>;
-               pinctrl-names = "default";
-               reset-gpios = <&gpio1 7 GPIO_ACTIVE_LOW>;
-               dsi-lanes = <4>;
-               width-mm = <68>;
-               height-mm = <121>;
-
-               port {
-                       panel_in: endpoint {
-                               remote-endpoint = <&mipi_out>;
-                       };
-               };
-       };
diff --git a/Documentation/devicetree/bindings/display/panel/raydium,rm67191.yaml b/Documentation/devicetree/bindings/display/panel/raydium,rm67191.yaml
new file mode 100644 (file)
index 0000000..745dd24
--- /dev/null
@@ -0,0 +1,75 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/raydium,rm67191.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Raydium RM67171 OLED LCD panel with MIPI-DSI protocol
+
+maintainers:
+  - Robert Chiras <robert.chiras@nxp.com>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: raydium,rm67191
+
+  reg: true
+  port: true
+  reset-gpios: true
+  width-mm: true
+  height-mm: true
+
+  dsi-lanes:
+    description: Number of DSI lanes to be used must be <3> or <4>
+    enum: [3, 4]
+
+  v3p3-supply:
+    description: phandle to 3.3V regulator that powers the VDD_3V3 pin
+
+  v1p8-supply:
+    description: phandle to 1.8V regulator that powers the VDD_1V8 pin
+
+  video-mode:
+    description: |
+      0 - burst-mode
+      1 - non-burst with sync event
+      2 - non-burst with sync pulse
+    enum: [0, 1, 2]
+
+required:
+  - compatible
+  - reg
+  - dsi-lanes
+  - port
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    dsi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        panel@0 {
+            compatible = "raydium,rm67191";
+            reg = <0>;
+            reset-gpios = <&gpio1 7 GPIO_ACTIVE_LOW>;
+            dsi-lanes = <4>;
+            width-mm = <68>;
+            height-mm = <121>;
+            video-mode = <1>;
+
+            port {
+                panel_in: endpoint {
+                    remote-endpoint = <&mipi_out>;
+                };
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,amoled-mipi-dsi.yaml b/Documentation/devicetree/bindings/display/panel/samsung,amoled-mipi-dsi.yaml
new file mode 100644 (file)
index 0000000..96bdde9
--- /dev/null
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/samsung,amoled-mipi-dsi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung AMOLED MIPI-DSI panels
+
+maintainers:
+  - Hoegeun Kwon <hoegeun.kwon@samsung.com>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    enum:
+        # Samsung S6E63J0X03 1.63" 320x320 AMOLED panel
+      - samsung,s6e63j0x03
+        # Samsung S6E3HA2 5.7" 1440x2560 AMOLED panel
+      - samsung,s6e3ha2
+        # Samsung S6E3HF2 5.65" 1600x2560 AMOLED panel
+      - samsung,s6e3hf2
+
+  reg: true
+  reset-gpios: true
+  enable-gpios: true
+  te-gpios: true
+
+  vdd3-supply:
+    description: I/O voltage supply
+
+  vci-supply:
+    description: voltage supply for analog circuits
+
+required:
+  - compatible
+  - reg
+  - vdd3-supply
+  - vci-supply
+  - reset-gpios
+  - enable-gpios
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    dsi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        panel@0 {
+            compatible = "samsung,s6e3ha2";
+            reg = <0>;
+            vdd3-supply = <&ldo27_reg>;
+            vci-supply = <&ldo28_reg>;
+            reset-gpios = <&gpg0 0 GPIO_ACTIVE_LOW>;
+            enable-gpios = <&gpf1 5 GPIO_ACTIVE_HIGH>;
+            te-gpios = <&gpf1 3 GPIO_ACTIVE_HIGH>;
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,ld9040.txt b/Documentation/devicetree/bindings/display/panel/samsung,ld9040.txt
deleted file mode 100644 (file)
index 354d4d1..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-Samsung LD9040 AMOLED LCD parallel RGB panel with SPI control bus
-
-Required properties:
-  - compatible: "samsung,ld9040"
-  - reg: address of the panel on SPI bus
-  - vdd3-supply: core voltage supply
-  - vci-supply: voltage supply for analog circuits
-  - reset-gpios: a GPIO spec for the reset pin
-  - display-timings: timings for the connected panel according to [1]
-
-The panel must obey rules for SPI slave device specified in document [2].
-
-Optional properties:
-  - power-on-delay: delay after turning regulators on [ms]
-  - reset-delay: delay after reset sequence [ms]
-  - panel-width-mm: physical panel width [mm]
-  - panel-height-mm: physical panel height [mm]
-
-The device node can contain one 'port' child node with one child
-'endpoint' node, according to the bindings defined in [3]. This
-node should describe panel's video bus.
-
-[1]: Documentation/devicetree/bindings/display/panel/display-timing.txt
-[2]: Documentation/devicetree/bindings/spi/spi-bus.txt
-[3]: Documentation/devicetree/bindings/media/video-interfaces.txt
-
-Example:
-
-       lcd@0 {
-               compatible = "samsung,ld9040";
-               reg = <0>;
-               vdd3-supply = <&ldo7_reg>;
-               vci-supply = <&ldo17_reg>;
-               reset-gpios = <&gpy4 5 0>;
-               spi-max-frequency = <1200000>;
-               spi-cpol;
-               spi-cpha;
-               power-on-delay = <10>;
-               reset-delay = <10>;
-               panel-width-mm = <90>;
-               panel-height-mm = <154>;
-
-               display-timings {
-                       timing {
-                               clock-frequency = <23492370>;
-                               hactive = <480>;
-                               vactive = <800>;
-                               hback-porch = <16>;
-                               hfront-porch = <16>;
-                               vback-porch = <2>;
-                               vfront-porch = <28>;
-                               hsync-len = <2>;
-                               vsync-len = <1>;
-                               hsync-active = <0>;
-                               vsync-active = <0>;
-                               de-active = <0>;
-                               pixelclk-active = <0>;
-                       };
-               };
-
-               port {
-                       lcd_ep: endpoint {
-                               remote-endpoint = <&fimd_dpi_ep>;
-                       };
-               };
-       };
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,ld9040.yaml b/Documentation/devicetree/bindings/display/panel/samsung,ld9040.yaml
new file mode 100644 (file)
index 0000000..060ee27
--- /dev/null
@@ -0,0 +1,107 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/samsung,ld9040.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung LD9040 AMOLED LCD parallel RGB panel with SPI control bus
+
+description: |
+  The panel must obey the rules for a SPI slave device as specified in
+  spi/spi-controller.yaml
+
+maintainers:
+  - Andrzej Hajda <a.hajda@samsung.com>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: samsung,ld9040
+
+  display-timings: true
+  port: true
+  reg: true
+  reset-gpios: true
+
+  vdd3-supply:
+    description: core voltage supply
+
+  vci-supply:
+    description: voltage supply for analog circuits
+
+  power-on-delay:
+    $ref: /schemas/types.yaml#/definitions/uint32
+    description: delay after turning regulators on [ms]
+
+  reset-delay:
+    $ref: /schemas/types.yaml#/definitions/uint32
+    description: delay after reset sequence [ms]
+
+  panel-width-mm:
+    description: physical panel width [mm]
+
+  panel-height-mm:
+    description: physical panel height [mm]
+
+required:
+  - compatible
+  - reg
+  - vdd3-supply
+  - vci-supply
+  - reset-gpios
+  - display-timings
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    spi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        lcd@0 {
+            compatible = "samsung,ld9040";
+            #address-cells = <1>;
+            #size-cells = <0>;
+
+            reg = <0>;
+            vdd3-supply = <&ldo7_reg>;
+            vci-supply = <&ldo17_reg>;
+            reset-gpios = <&gpy4 5 0>;
+            spi-max-frequency = <1200000>;
+            spi-cpol;
+            spi-cpha;
+            power-on-delay = <10>;
+            reset-delay = <10>;
+            panel-width-mm = <90>;
+            panel-height-mm = <154>;
+
+            display-timings {
+                timing {
+                    clock-frequency = <23492370>;
+                    hactive = <480>;
+                    vactive = <800>;
+                    hback-porch = <16>;
+                    hfront-porch = <16>;
+                    vback-porch = <2>;
+                    vfront-porch = <28>;
+                    hsync-len = <2>;
+                    vsync-len = <1>;
+                    hsync-active = <0>;
+                    vsync-active = <0>;
+                    de-active = <0>;
+                    pixelclk-active = <0>;
+                };
+            };
+
+            port {
+                lcd_ep: endpoint {
+                    remote-endpoint = <&fimd_dpi_ep>;
+                };
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,s6d16d0.txt b/Documentation/devicetree/bindings/display/panel/samsung,s6d16d0.txt
deleted file mode 100644 (file)
index b94e366..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-Samsung S6D16D0 4" 864x480 AMOLED panel
-
-Required properties:
-  - compatible: should be:
-    "samsung,s6d16d0",
-  - reg: the virtual channel number of a DSI peripheral
-  - vdd1-supply: I/O voltage supply
-  - reset-gpios: a GPIO spec for the reset pin (active low)
-
-The device node can contain one 'port' child node with one child
-'endpoint' node, according to the bindings defined in
-media/video-interfaces.txt. This node should describe panel's video bus.
-
-Example:
-&dsi {
-       ...
-
-       panel@0 {
-               compatible = "samsung,s6d16d0";
-               reg = <0>;
-               vdd1-supply = <&foo>;
-               reset-gpios = <&foo_gpio 0 GPIO_ACTIVE_LOW>;
-
-               port {
-                       panel_in: endpoint {
-                               remote-endpoint = <&dsi_out>;
-                       };
-               };
-       };
-};
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,s6d16d0.yaml b/Documentation/devicetree/bindings/display/panel/samsung,s6d16d0.yaml
new file mode 100644 (file)
index 0000000..66d1474
--- /dev/null
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/samsung,s6d16d0.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung S6D16D0 4" 864x480 AMOLED panel
+
+maintainers:
+  - Linus Walleij <linus.walleij@linaro.org>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: samsung,s6d16d0
+
+  port: true
+  reg: true
+  reset-gpios: true
+
+  vdd1-supply:
+    description: I/O voltage supply
+
+required:
+  - compatible
+  - reg
+  - vdd1-supply
+  - reset-gpios
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    dsi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        panel@0 {
+            compatible = "samsung,s6d16d0";
+            reg = <0>;
+            vdd1-supply = <&foo>;
+            reset-gpios = <&foo_gpio 0 GPIO_ACTIVE_LOW>;
+
+            port {
+                panel_in: endpoint {
+                    remote-endpoint = <&dsi_out>;
+                };
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,s6e3ha2.txt b/Documentation/devicetree/bindings/display/panel/samsung,s6e3ha2.txt
deleted file mode 100644 (file)
index 4acea25..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-Samsung S6E3HA2 5.7" 1440x2560 AMOLED panel
-Samsung S6E3HF2 5.65" 1600x2560 AMOLED panel
-
-Required properties:
-  - compatible: should be one of:
-    "samsung,s6e3ha2",
-    "samsung,s6e3hf2".
-  - reg: the virtual channel number of a DSI peripheral
-  - vdd3-supply: I/O voltage supply
-  - vci-supply: voltage supply for analog circuits
-  - reset-gpios: a GPIO spec for the reset pin (active low)
-  - enable-gpios: a GPIO spec for the panel enable pin (active high)
-
-Optional properties:
-  - te-gpios: a GPIO spec for the tearing effect synchronization signal
-    gpio pin (active high)
-
-Example:
-&dsi {
-       ...
-
-       panel@0 {
-               compatible = "samsung,s6e3ha2";
-               reg = <0>;
-               vdd3-supply = <&ldo27_reg>;
-               vci-supply = <&ldo28_reg>;
-               reset-gpios = <&gpg0 0 GPIO_ACTIVE_LOW>;
-               enable-gpios = <&gpf1 5 GPIO_ACTIVE_HIGH>;
-               te-gpios = <&gpf1 3 GPIO_ACTIVE_HIGH>;
-       };
-};
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,s6e63j0x03.txt b/Documentation/devicetree/bindings/display/panel/samsung,s6e63j0x03.txt
deleted file mode 100644 (file)
index 3f1a839..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-Samsung S6E63J0X03 1.63" 320x320 AMOLED panel (interface: MIPI-DSI command mode)
-
-Required properties:
-  - compatible: "samsung,s6e63j0x03"
-  - reg: the virtual channel number of a DSI peripheral
-  - vdd3-supply: I/O voltage supply
-  - vci-supply: voltage supply for analog circuits
-  - reset-gpios: a GPIO spec for the reset pin (active low)
-  - te-gpios: a GPIO spec for the tearing effect synchronization signal
-    gpio pin (active high)
-
-Example:
-&dsi {
-       ...
-
-       panel@0 {
-               compatible = "samsung,s6e63j0x03";
-               reg = <0>;
-               vdd3-supply = <&ldo16_reg>;
-               vci-supply = <&ldo20_reg>;
-               reset-gpios = <&gpe0 1 GPIO_ACTIVE_LOW>;
-               te-gpios = <&gpx0 6 GPIO_ACTIVE_HIGH>;
-       };
-};
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.txt b/Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.txt
deleted file mode 100644 (file)
index 9fb9ebe..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-Samsung s6e63m0 AMOLED LCD panel
-
-Required properties:
-  - compatible: "samsung,s6e63m0"
-  - reset-gpios: GPIO spec for reset pin
-  - vdd3-supply: VDD regulator
-  - vci-supply: VCI regulator
-
-The panel must obey rules for SPI slave device specified in document [1].
-
-The device node can contain one 'port' child node with one child
-'endpoint' node, according to the bindings defined in [2]. This
-node should describe panel's video bus.
-
-[1]: Documentation/devicetree/bindings/spi/spi-bus.txt
-[2]: Documentation/devicetree/bindings/media/video-interfaces.txt
-
-Example:
-
-               s6e63m0: display@0 {
-                       compatible = "samsung,s6e63m0";
-                       reg = <0>;
-                       reset-gpio = <&mp05 5 1>;
-                       vdd3-supply = <&ldo12_reg>;
-                       vci-supply = <&ldo11_reg>;
-                       spi-max-frequency = <1200000>;
-
-                       port {
-                               lcd_ep: endpoint {
-                                       remote-endpoint = <&fimd_ep>;
-                               };
-                       };
-               };
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.yaml b/Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.yaml
new file mode 100644 (file)
index 0000000..1dab80a
--- /dev/null
@@ -0,0 +1,60 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/samsung,s6e63m0.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung s6e63m0 AMOLED LCD panel
+
+maintainers:
+  - Jonathan Bakker <xc-racer2@live.ca>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: samsung,s6e63m0
+
+  reg: true
+  reset-gpios: true
+  port: true
+
+  vdd3-supply:
+    description: VDD regulator
+
+  vci-supply:
+    description: VCI regulator
+
+required:
+  - compatible
+  - reset-gpios
+  - vdd3-supply
+  - vci-supply
+  - port
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    spi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        display@0 {
+            compatible = "samsung,s6e63m0";
+            reg = <0>;
+            reset-gpios = <&mp05 5 1>;
+            vdd3-supply = <&ldo12_reg>;
+            vci-supply = <&ldo11_reg>;
+            spi-max-frequency = <1200000>;
+
+            port {
+                lcd_ep: endpoint {
+                    remote-endpoint = <&fimd_ep>;
+                };
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/seiko,43wvf1g.txt b/Documentation/devicetree/bindings/display/panel/seiko,43wvf1g.txt
deleted file mode 100644 (file)
index aae57ef..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-Seiko Instruments Inc. 4.3" WVGA (800 x RGB x 480) TFT with Touch-Panel
-
-Required properties:
-- compatible: should be "sii,43wvf1g".
-- "dvdd-supply": 3v3 digital regulator.
-- "avdd-supply": 5v analog regulator.
-
-Optional properties:
-- backlight: phandle for the backlight control.
-
-Example:
-
-       panel {
-               compatible = "sii,43wvf1g";
-               backlight = <&backlight_display>;
-               dvdd-supply = <&reg_lcd_3v3>;
-               avdd-supply = <&reg_lcd_5v>;
-               port {
-                       panel_in: endpoint {
-                               remote-endpoint = <&display_out>;
-                       };
-               };
-       };
diff --git a/Documentation/devicetree/bindings/display/panel/seiko,43wvf1g.yaml b/Documentation/devicetree/bindings/display/panel/seiko,43wvf1g.yaml
new file mode 100644 (file)
index 0000000..cfaa50c
--- /dev/null
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/seiko,43wvf1g.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Seiko Instruments Inc. 4.3" WVGA (800 x RGB x 480) TFT with Touch-Panel
+
+maintainers:
+  - Marco Franchi <marco.franchi@nxp.com>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: sii,43wvf1g
+
+  backlight: true
+  port: true
+
+  dvdd-supply:
+    description: 3v3 digital regulator
+
+  avdd-supply:
+    description: 5v analog regulator
+
+required:
+  - compatible
+  - dvdd-supply
+  - avdd-supply
+
+additionalProperties: false
+
+examples:
+  - |
+    panel {
+        compatible = "sii,43wvf1g";
+
+        backlight = <&backlight_display>;
+        dvdd-supply = <&reg_lcd_3v3>;
+        avdd-supply = <&reg_lcd_5v>;
+        port {
+            panel_in: endpoint {
+                remote-endpoint = <&display_out>;
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,lq150x1lg11.txt b/Documentation/devicetree/bindings/display/panel/sharp,lq150x1lg11.txt
deleted file mode 100644 (file)
index 0f57c31..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-Sharp 15" LQ150X1LG11 XGA TFT LCD panel
-
-Required properties:
-- compatible: should be "sharp,lq150x1lg11"
-- power-supply: regulator to provide the VCC supply voltage (3.3 volts)
-
-Optional properties:
-- backlight: phandle of the backlight device
-- rlud-gpios: a single GPIO for the RL/UD (rotate 180 degrees) pin.
-- sellvds-gpios: a single GPIO for the SELLVDS pin.
-
-If rlud-gpios and/or sellvds-gpios are not specified, the RL/UD and/or SELLVDS
-pins are assumed to be handled appropriately by the hardware.
-
-Example:
-
-       backlight: backlight {
-               compatible = "pwm-backlight";
-               pwms = <&pwm 0 100000>;                      /* VBR */
-
-               brightness-levels = <0 20 40 60 80 100>;
-               default-brightness-level = <2>;
-
-               power-supply = <&vdd_12v_reg>;               /* VDD */
-               enable-gpios = <&gpio 42 GPIO_ACTIVE_HIGH>;  /* XSTABY */
-       };
-
-       panel {
-               compatible = "sharp,lq150x1lg11";
-
-               power-supply = <&vcc_3v3_reg>;               /* VCC */
-
-               backlight = <&backlight>;
-               rlud-gpios = <&gpio 17 GPIO_ACTIVE_HIGH>;    /* RL/UD */
-               sellvds-gpios = <&gpio 18 GPIO_ACTIVE_HIGH>; /* SELLVDS */
-       };
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,lq150x1lg11.yaml b/Documentation/devicetree/bindings/display/panel/sharp,lq150x1lg11.yaml
new file mode 100644 (file)
index 0000000..92f2d12
--- /dev/null
@@ -0,0 +1,58 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/sharp,lq150x1lg11.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Sharp 15" LQ150X1LG11 XGA TFT LCD panel
+
+maintainers:
+  - Peter Rosin <peda@axentia.se>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: sharp,lq150x1lg11
+
+  power-supply: true
+  backlight: true
+
+  rlud-gpios:
+    maxItems: 1
+    description: |
+      GPIO for the RL/UD (rotate 180 degrees) pin.
+      If rlud-gpios and/or sellvds-gpios are not specified,
+      the RL/UD and/or SELLVDS pins are assumed to be handled
+      appropriately by the hardware.
+
+  sellvds-gpios:
+    maxItems: 1
+    description: |
+      GPIO for the SELLVDS pin.
+      If rlud-gpios and/or sellvds-gpios are not specified,
+      the RL/UD and/or SELLVDS pins are assumed to be handled
+      appropriately by the hardware.
+
+required:
+  - compatible
+  - power-supply
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    panel {
+        compatible = "sharp,lq150x1lg11";
+
+        power-supply = <&vcc_3v3_reg>;               /* VCC */
+
+        backlight = <&backlight>;
+        rlud-gpios = <&gpio 17 GPIO_ACTIVE_HIGH>;    /* RL/UD */
+        sellvds-gpios = <&gpio 18 GPIO_ACTIVE_HIGH>; /* SELLVDS */
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,ls037v7dw01.txt b/Documentation/devicetree/bindings/display/panel/sharp,ls037v7dw01.txt
deleted file mode 100644 (file)
index 0cc8981..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-SHARP LS037V7DW01 TFT-LCD panel
-===================================
-
-Required properties:
-- compatible: "sharp,ls037v7dw01"
-
-Optional properties:
-- label: a symbolic name for the panel
-- enable-gpios: a GPIO spec for the optional enable pin.
-  This pin is the INI pin as specified in the LS037V7DW01.pdf file.
-- reset-gpios: a GPIO spec for the optional reset pin.
-  This pin is the RESB pin as specified in the LS037V7DW01.pdf file.
-- mode-gpios: a GPIO
-  ordered MO, LR, and UD as specified in the LS037V7DW01.pdf file.
-
-Required nodes:
-- Video port for DPI input
-
-This panel can have zero to five GPIOs to configure to change configuration
-between QVGA and VGA mode and the scan direction. As these pins can be also
-configured with external pulls, all the GPIOs are considered optional with holes
-in the array.
-
-Example
--------
-
-Example when connected to a omap2+ based device:
-
-lcd0: display {
-       compatible = "sharp,ls037v7dw01";
-       power-supply = <&lcd_3v3>;
-       enable-gpios = <&gpio5 24 GPIO_ACTIVE_HIGH>;    /* gpio152, lcd INI */
-       reset-gpios = <&gpio5 27 GPIO_ACTIVE_HIGH>;     /* gpio155, lcd RESB */
-       mode-gpios = <&gpio5 26 GPIO_ACTIVE_HIGH        /* gpio154, lcd MO */
-                     &gpio1 2 GPIO_ACTIVE_HIGH         /* gpio2, lcd LR */
-                     &gpio1 3 GPIO_ACTIVE_HIGH>;       /* gpio3, lcd UD */
-
-       port {
-               lcd_in: endpoint {
-                       remote-endpoint = <&dpi_out>;
-               };
-       };
-};
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,ls037v7dw01.yaml b/Documentation/devicetree/bindings/display/panel/sharp,ls037v7dw01.yaml
new file mode 100644 (file)
index 0000000..8c47a9b
--- /dev/null
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/sharp,ls037v7dw01.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: SHARP LS037V7DW01 TFT-LCD panel
+
+description: |
+  This panel can have zero to five GPIOs to configure to change configuration
+  between QVGA and VGA mode and the scan direction. As these pins can be also
+  configured with external pulls, all the GPIOs are considered optional with holes
+  in the array.
+
+maintainers:
+  - Tony Lindgren <tony@atomide.com>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: sharp,ls037v7dw01
+
+  label: true
+  enable-gpios: true
+  reset-gpios: true
+  port: true
+  power-supply: true
+
+  mode-gpios:
+    minItems: 1
+    maxItems: 3
+    description: |
+      GPIO ordered MO, LR, and UD as specified in LS037V7DW01.pdf
+      This panel can have zero to three GPIOs to configure to
+      change configuration between QVGA and VGA mode and the
+      scan direction. As these pins can be also configured
+      with external pulls, all the GPIOs are considered
+      optional with holes in the array.
+
+required:
+  - compatible
+  - port
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    lcd0: display {
+        compatible = "sharp,ls037v7dw01";
+        power-supply = <&lcd_3v3>;
+        enable-gpios = <&gpio5 24 GPIO_ACTIVE_HIGH>;    /* gpio152, lcd INI */
+        reset-gpios = <&gpio5 27 GPIO_ACTIVE_HIGH>;     /* gpio155, lcd RESB */
+        mode-gpios = <&gpio5 26 GPIO_ACTIVE_HIGH        /* gpio154, lcd MO */
+                      &gpio1 2 GPIO_ACTIVE_HIGH         /* gpio2, lcd LR */
+                      &gpio1 3 GPIO_ACTIVE_HIGH>;       /* gpio3, lcd UD */
+
+        port {
+            lcd_in: endpoint {
+                remote-endpoint = <&dpi_out>;
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,ls043t1le01.txt b/Documentation/devicetree/bindings/display/panel/sharp,ls043t1le01.txt
deleted file mode 100644 (file)
index 3770a11..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-Sharp Microelectronics 4.3" qHD TFT LCD panel
-
-Required properties:
-- compatible: should be "sharp,ls043t1le01-qhd"
-- reg: DSI virtual channel of the peripheral
-- power-supply: phandle of the regulator that provides the supply voltage
-
-Optional properties:
-- backlight: phandle of the backlight device attached to the panel
-- reset-gpios: a GPIO spec for the reset pin
-
-Example:
-
-       mdss_dsi@fd922800 {
-               panel@0 {
-                       compatible = "sharp,ls043t1le01-qhd";
-                       reg = <0>;
-                       avdd-supply = <&pm8941_l22>;
-                       backlight = <&pm8941_wled>;
-                       reset-gpios = <&pm8941_gpios 19 GPIO_ACTIVE_HIGH>;
-               };
-       };
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,ls043t1le01.yaml b/Documentation/devicetree/bindings/display/panel/sharp,ls043t1le01.yaml
new file mode 100644 (file)
index 0000000..a90d0d8
--- /dev/null
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/sharp,ls043t1le01.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Sharp Microelectronics 4.3" qHD TFT LCD panel
+
+maintainers:
+  - Werner Johansson <werner.johansson@sonymobile.com>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: sharp,ls043t1le01-qhd
+
+  reg: true
+  backlight: true
+  reset-gpios: true
+  port: true
+
+  avdd-supply:
+    description: handle of the regulator that provides the supply voltage
+
+required:
+  - compatible
+  - reg
+  - avdd-supply
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    dsi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        panel@0 {
+            compatible = "sharp,ls043t1le01-qhd";
+            reg = <0>;
+            avdd-supply = <&pm8941_l22>;
+            backlight = <&pm8941_wled>;
+            reset-gpios = <&pm8941_gpios 19 GPIO_ACTIVE_HIGH>;
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/simple-panel.txt b/Documentation/devicetree/bindings/display/panel/simple-panel.txt
deleted file mode 100644 (file)
index e11208f..0000000
+++ /dev/null
@@ -1 +0,0 @@
-See panel-common.yaml in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/sitronix,st7701.txt b/Documentation/devicetree/bindings/display/panel/sitronix,st7701.txt
deleted file mode 100644 (file)
index ccd1759..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-Sitronix ST7701 based LCD panels
-
-ST7701 designed for small and medium sizes of TFT LCD display, is
-capable of supporting up to 480RGBX864 in resolution. It provides
-several system interfaces like MIPI/RGB/SPI.
-
-Techstar TS8550B is 480x854, 2-lane MIPI DSI LCD panel which has
-inbuilt ST7701 chip.
-
-Required properties:
-- compatible: must be "sitronix,st7701" and one of
-  * "techstar,ts8550b"
-- reset-gpios: a GPIO phandle for the reset pin
-
-Required properties for techstar,ts8550b:
-- reg: DSI virtual channel used by that screen
-- VCC-supply: analog regulator for MIPI circuit
-- IOVCC-supply: I/O system regulator
-
-Optional properties:
-- backlight: phandle for the backlight control.
-
-panel@0 {
-       compatible = "techstar,ts8550b", "sitronix,st7701";
-       reg = <0>;
-       VCC-supply = <&reg_dldo2>;
-       IOVCC-supply = <&reg_dldo2>;
-       reset-gpios = <&pio 3 24 GPIO_ACTIVE_HIGH>; /* LCD-RST: PD24 */
-       backlight = <&backlight>;
-};
diff --git a/Documentation/devicetree/bindings/display/panel/sitronix,st7701.yaml b/Documentation/devicetree/bindings/display/panel/sitronix,st7701.yaml
new file mode 100644 (file)
index 0000000..6dff59f
--- /dev/null
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/sitronix,st7701.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Sitronix ST7701 based LCD panels
+
+maintainers:
+  - Jagan Teki <jagan@amarulasolutions.com>
+
+description: |
+  ST7701 designed for small and medium sizes of TFT LCD display, is
+  capable of supporting up to 480RGBX864 in resolution. It provides
+  several system interfaces like MIPI/RGB/SPI.
+
+  Techstar TS8550B is 480x854, 2-lane MIPI DSI LCD panel which has
+  inbuilt ST7701 chip.
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    items:
+      - enum:
+          - techstar,ts8550b
+      - const: sitronix,st7701
+
+  reg:
+    description: DSI virtual channel used by that screen
+    maxItems: 1
+
+  VCC-supply:
+    description: analog regulator for MIPI circuit
+
+  IOVCC-supply:
+    description: I/O system regulator
+
+  reset-gpios: true
+
+  backlight: true
+
+required:
+  - compatible
+  - reg
+  - VCC-supply
+  - IOVCC-supply
+  - reset-gpios
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    dsi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        panel@0 {
+            compatible = "techstar,ts8550b", "sitronix,st7701";
+            reg = <0>;
+            VCC-supply = <&reg_dldo2>;
+            IOVCC-supply = <&reg_dldo2>;
+            reset-gpios = <&pio 3 24 GPIO_ACTIVE_HIGH>; /* LCD-RST: PD24 */
+            backlight = <&backlight>;
+        };
+    };
diff --git a/Documentation/devicetree/bindings/display/panel/sitronix,st7789v.txt b/Documentation/devicetree/bindings/display/panel/sitronix,st7789v.txt
deleted file mode 100644 (file)
index c6995dd..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-Sitronix ST7789V RGB panel with SPI control bus
-
-Required properties:
-  - compatible: "sitronix,st7789v"
-  - reg: Chip select of the panel on the SPI bus
-  - reset-gpios: a GPIO phandle for the reset pin
-  - power-supply: phandle of the regulator that provides the supply voltage
-
-Optional properties:
-  - backlight: phandle to the backlight used
-
-The generic bindings for the SPI slaves documented in [1] also applies
-
-The device node can contain one 'port' child node with one child
-'endpoint' node, according to the bindings defined in [2]. This
-node should describe panel's video bus.
-
-[1]: Documentation/devicetree/bindings/spi/spi-bus.txt
-[2]: Documentation/devicetree/bindings/graph.txt
-
-Example:
-
-panel@0 {
-       compatible = "sitronix,st7789v";
-       reg = <0>;
-       reset-gpios = <&pio 6 11 GPIO_ACTIVE_LOW>;
-       backlight = <&pwm_bl>;
-       spi-max-frequency = <100000>;
-       spi-cpol;
-       spi-cpha;
-
-       port {
-               panel_input: endpoint {
-                       remote-endpoint = <&tcon0_out_panel>;
-               };
-       };
-};
diff --git a/Documentation/devicetree/bindings/display/panel/sitronix,st7789v.yaml b/Documentation/devicetree/bindings/display/panel/sitronix,st7789v.yaml
new file mode 100644 (file)
index 0000000..fa46d15
--- /dev/null
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/sitronix,st7789v.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Sitronix ST7789V RGB panel with SPI control bus
+
+description: |
+  The panel must obey the rules for a SPI slave device as specified in
+  spi/spi-controller.yaml
+
+maintainers:
+  - Maxime Ripard <mripard@kernel.org>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: sitronix,st7789v
+
+  reg: true
+  reset-gpios: true
+  power-supply: true
+  backlight: true
+  port: true
+
+required:
+  - compatible
+  - reg
+  - reset-gpios
+  - power-supply
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    spi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        panel@0 {
+            compatible = "sitronix,st7789v";
+            reg = <0>;
+            reset-gpios = <&pio 6 11 GPIO_ACTIVE_LOW>;
+            backlight = <&pwm_bl>;
+            power-supply = <&power>;
+            spi-max-frequency = <100000>;
+            spi-cpol;
+            spi-cpha;
+
+            port {
+                panel_input: endpoint {
+                    remote-endpoint = <&tcon0_out_panel>;
+                };
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/sony,acx565akm.txt b/Documentation/devicetree/bindings/display/panel/sony,acx565akm.txt
deleted file mode 100644 (file)
index e123332..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-Sony ACX565AKM SDI Panel
-========================
-
-Required properties:
-- compatible: "sony,acx565akm"
-
-Optional properties:
-- label: a symbolic name for the panel
-- reset-gpios: panel reset gpio
-
-Required nodes:
-- Video port for SDI input
-
-Example
--------
-
-acx565akm@2 {
-       compatible = "sony,acx565akm";
-       spi-max-frequency = <6000000>;
-       reg = <2>;
-
-       label = "lcd";
-       reset-gpios = <&gpio3 26 GPIO_ACTIVE_HIGH>; /* 90 */
-
-       port {
-               lcd_in: endpoint {
-                       remote-endpoint = <&sdi_out>;
-               };
-       };
-};
diff --git a/Documentation/devicetree/bindings/display/panel/sony,acx565akm.yaml b/Documentation/devicetree/bindings/display/panel/sony,acx565akm.yaml
new file mode 100644 (file)
index 0000000..95d053c
--- /dev/null
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/sony,acx565akm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Sony ACX565AKM SDI Panel
+
+description: |
+  The panel must obey the rules for a SPI slave device as specified in
+  spi/spi-controller.yaml
+
+maintainers:
+  - Tomi Valkeinen <tomi.valkeinen@ti.com>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: sony,acx565akm
+
+  label: true
+  reset-gpios: true
+  port: true
+
+required:
+  - compatible
+  - port
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    spi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        panel@2 {
+            compatible = "sony,acx565akm";
+            spi-max-frequency = <6000000>;
+            reg = <2>;
+
+            label = "lcd";
+            reset-gpios = <&gpio3 26 GPIO_ACTIVE_HIGH>; /* 90 */
+
+            port {
+                lcd_in: endpoint {
+                    remote-endpoint = <&sdi_out>;
+                };
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/startek,startek-kd050c.txt b/Documentation/devicetree/bindings/display/panel/startek,startek-kd050c.txt
deleted file mode 100644 (file)
index 70cd8d1..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-Startek Electronic Technology Co. KD050C 5.0" WVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "startek,startek-kd050c"
diff --git a/Documentation/devicetree/bindings/display/panel/startek,startek-kd050c.yaml b/Documentation/devicetree/bindings/display/panel/startek,startek-kd050c.yaml
new file mode 100644 (file)
index 0000000..fd66864
--- /dev/null
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/startek,startek-kd050c.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Startek Electronic Technology Co. KD050C 5.0" WVGA TFT LCD panel
+
+maintainers:
+  - Nikita Kiryanov <nikita@compulab.co.il>
+
+allOf:
+  - $ref: panel-dpi.yaml#
+
+properties:
+  compatible:
+    items:
+      - const: startek,startek-kd050c
+      - {} # panel-dpi, but not listed here to avoid false select
+
+  backlight: true
+  enable-gpios: true
+  height-mm: true
+  label: true
+  panel-timing: true
+  port: true
+  power-supply: true
+  reset-gpios: true
+  width-mm: true
+
+additionalProperties: false
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/tpo,td.yaml b/Documentation/devicetree/bindings/display/panel/tpo,td.yaml
new file mode 100644 (file)
index 0000000..4aa6056
--- /dev/null
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/tpo,td.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Toppoly TD Panels
+
+description: |
+  The panel must obey the rules for a SPI slave device as specified in
+  spi/spi-controller.yaml
+
+maintainers:
+  - Marek Belisko <marek@goldelico.com>
+  - H. Nikolaus Schaller <hns@goldelico.com>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    enum:
+        # Toppoly TD028TTEC1 Panel
+      - tpo,td028ttec1
+        # Toppoly TD043MTEA1 Panel
+      - tpo,td043mtea1
+
+  reg: true
+  label: true
+  reset-gpios: true
+  backlight: true
+  port: true
+
+required:
+  - compatible
+  - port
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    spi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        panel: panel@0 {
+            compatible = "tpo,td043mtea1";
+            reg = <0>;
+            spi-max-frequency = <100000>;
+            spi-cpol;
+            spi-cpha;
+
+            label = "lcd";
+
+            reset-gpios = <&gpio7 7 0>;
+
+            port {
+                lcd_in: endpoint {
+                    remote-endpoint = <&dpi_out>;
+                };
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/tpo,td028ttec1.txt b/Documentation/devicetree/bindings/display/panel/tpo,td028ttec1.txt
deleted file mode 100644 (file)
index 898e06e..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-Toppoly TD028TTEC1 Panel
-========================
-
-Required properties:
-- compatible: "tpo,td028ttec1"
-
-Optional properties:
-- label: a symbolic name for the panel
-- backlight: phandle of the backlight device
-
-Required nodes:
-- Video port for DPI input
-
-Example
--------
-
-lcd-panel: td028ttec1@0 {
-       compatible = "tpo,td028ttec1";
-       reg = <0>;
-       spi-max-frequency = <100000>;
-       spi-cpol;
-       spi-cpha;
-
-       label = "lcd";
-       backlight = <&backlight>;
-       port {
-               lcd_in: endpoint {
-                       remote-endpoint = <&dpi_out>;
-               };
-       };
-};
-
diff --git a/Documentation/devicetree/bindings/display/panel/tpo,td043mtea1.txt b/Documentation/devicetree/bindings/display/panel/tpo,td043mtea1.txt
deleted file mode 100644 (file)
index ec6d629..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-TPO TD043MTEA1 Panel
-====================
-
-Required properties:
-- compatible: "tpo,td043mtea1"
-- reset-gpios: panel reset gpio
-
-Optional properties:
-- label: a symbolic name for the panel
-
-Required nodes:
-- Video port for DPI input
-
-Example
--------
-
-lcd-panel: panel@0 {
-       compatible = "tpo,td043mtea1";
-       reg = <0>;
-       spi-max-frequency = <100000>;
-       spi-cpol;
-       spi-cpha;
-
-       label = "lcd";
-
-       reset-gpios = <&gpio7 7 0>;
-
-       port {
-               lcd_in: endpoint {
-                       remote-endpoint = <&dpi_out>;
-               };
-       };
-};
diff --git a/Documentation/devicetree/bindings/display/panel/visionox,rm69299.yaml b/Documentation/devicetree/bindings/display/panel/visionox,rm69299.yaml
new file mode 100644 (file)
index 0000000..b36f39f
--- /dev/null
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: GPL-2.0-only or BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/visionox,rm69299.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Visionox model RM69299 Panels Device Tree Bindings.
+
+maintainers:
+ - Harigovindan P <harigovi@codeaurora.org>
+
+description: |
+  This binding is for display panels using a Visionox RM692999 panel.
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: visionox,rm69299-1080p-display
+
+  vdda-supply:
+    description: |
+      Phandle of the regulator that provides the vdda supply voltage.
+
+  vdd3p3-supply:
+    description: |
+      Phandle of the regulator that provides the vdd3p3 supply voltage.
+
+  port: true
+  reset-gpios: true
+
+additionalProperties: false
+
+required:
+  - compatible
+  - vdda-supply
+  - vdd3p3-supply
+  - reset-gpios
+  - port
+
+examples:
+  - |
+    panel {
+        compatible = "visionox,rm69299-1080p-display";
+
+        vdda-supply = <&src_pp1800_l8c>;
+        vdd3p3-supply = <&src_pp2800_l18a>;
+
+        reset-gpios = <&pm6150l_gpio 3 0>;
+        port {
+            panel0_in: endpoint {
+                remote-endpoint = <&dsi0_out>;
+            };
+        };
+    };
+...
index eb4ae41fe41f83c73d0269146e20d55f0349b115..51cd4d1627703a154ce3318f0df9006636c1c564 100644 (file)
@@ -50,6 +50,14 @@ Required Properties:
     VSP instance that serves the DU channel, and the channel index identifies
     the LIF instance in that VSP.
 
+Optional properties:
+  - resets: A list of phandle + reset-specifier pairs, one for each entry in
+    the reset-names property.
+  - reset-names: Names of the resets. This property is model-dependent.
+    - All but R8A7779 use one reset for a group of one or more successive
+      channels. The resets must be named "du.x" with "x" being the numerical
+      index of the lowest channel in the group.
+
 Required nodes:
 
 The connections to the DU output video ports are modeled using the OF graph
@@ -96,6 +104,8 @@ Example: R8A7795 (R-Car H3) ES2.0 DU
                         <&cpg CPG_MOD 722>,
                         <&cpg CPG_MOD 721>;
                clock-names = "du.0", "du.1", "du.2", "du.3";
+               resets = <&cpg 724>, <&cpg 722>;
+               reset-names = "du.0", "du.2";
                renesas,cmms = <&cmm0>, <&cmm1>, <&cmm2>, <&cmm3>;
                renesas,vsps = <&vspd0 0>, <&vspd1 0>, <&vspd2 0>, <&vspd0 1>;
 
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.txt b/Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.txt
deleted file mode 100644 (file)
index d1ad31b..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-Rockchip specific extensions for rk3066 HDMI
-============================================
-
-Required properties:
-- compatible:
-       "rockchip,rk3066-hdmi";
-- reg:
-       Physical base address and length of the controller's registers.
-- clocks, clock-names:
-       Phandle to HDMI controller clock, name should be "hclk".
-- interrupts:
-       HDMI interrupt number.
-- power-domains:
-       Phandle to the RK3066_PD_VIO power domain.
-- rockchip,grf:
-       This soc uses GRF regs to switch the HDMI TX input between vop0 and vop1.
-- ports:
-       Contains one port node with two endpoints, numbered 0 and 1,
-       connected respectively to vop0 and vop1.
-       Contains one port node with one endpoint
-       connected to a hdmi-connector node.
-- pinctrl-0, pinctrl-name:
-       Switch the iomux for the HPD/I2C pins to HDMI function.
-
-Example:
-       hdmi: hdmi@10116000 {
-               compatible = "rockchip,rk3066-hdmi";
-               reg = <0x10116000 0x2000>;
-               interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&cru HCLK_HDMI>;
-               clock-names = "hclk";
-               power-domains = <&power RK3066_PD_VIO>;
-               rockchip,grf = <&grf>;
-               pinctrl-names = "default";
-               pinctrl-0 = <&hdmii2c_xfer>, <&hdmi_hpd>;
-
-               ports {
-                       #address-cells = <1>;
-                       #size-cells = <0>;
-                       hdmi_in: port@0 {
-                               reg = <0>;
-                               #address-cells = <1>;
-                               #size-cells = <0>;
-                               hdmi_in_vop0: endpoint@0 {
-                                       reg = <0>;
-                                       remote-endpoint = <&vop0_out_hdmi>;
-                               };
-                               hdmi_in_vop1: endpoint@1 {
-                                       reg = <1>;
-                                       remote-endpoint = <&vop1_out_hdmi>;
-                               };
-                       };
-                       hdmi_out: port@1 {
-                               reg = <1>;
-                               hdmi_out_con: endpoint {
-                                       remote-endpoint = <&hdmi_con_in>;
-                               };
-                       };
-               };
-       };
-
-&pinctrl {
-               hdmi {
-                       hdmi_hpd: hdmi-hpd {
-                               rockchip,pins = <0 RK_PA0 1 &pcfg_pull_default>;
-                       };
-                       hdmii2c_xfer: hdmii2c-xfer {
-                               rockchip,pins = <0 RK_PA1 1 &pcfg_pull_none>,
-                                               <0 RK_PA2 1 &pcfg_pull_none>;
-                       };
-               };
-};
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.yaml
new file mode 100644 (file)
index 0000000..4110d00
--- /dev/null
@@ -0,0 +1,140 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/rockchip/rockchip,rk3066-hdmi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip rk3066 HDMI controller
+
+maintainers:
+  - Sandy Huang <hjc@rock-chips.com>
+  - Heiko Stuebner <heiko@sntech.de>
+
+properties:
+  compatible:
+    const: rockchip,rk3066-hdmi
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  clock-names:
+    const: hclk
+
+  pinctrl-0:
+    maxItems: 2
+
+  pinctrl-names:
+    const: default
+    description:
+      Switch the iomux for the HPD/I2C pins to HDMI function.
+
+  power-domains:
+    maxItems: 1
+
+  rockchip,grf:
+    $ref: /schemas/types.yaml#/definitions/phandle
+    description:
+      This soc uses GRF regs to switch the HDMI TX input between vop0 and vop1.
+
+  ports:
+    type: object
+
+    properties:
+      "#address-cells":
+        const: 1
+
+      "#size-cells":
+        const: 0
+
+      port@0:
+        type: object
+        description:
+          Port node with two endpoints, numbered 0 and 1,
+          connected respectively to vop0 and vop1.
+
+      port@1:
+        type: object
+        description:
+          Port node with one endpoint connected to a hdmi-connector node.
+
+    required:
+      - "#address-cells"
+      - "#size-cells"
+      - port@0
+      - port@1
+
+    additionalProperties: false
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+  - clock-names
+  - pinctrl-0
+  - pinctrl-names
+  - power-domains
+  - rockchip,grf
+  - ports
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/clock/rk3066a-cru.h>
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/pinctrl/rockchip.h>
+    #include <dt-bindings/power/rk3066-power.h>
+    hdmi: hdmi@10116000 {
+      compatible = "rockchip,rk3066-hdmi";
+      reg = <0x10116000 0x2000>;
+      interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
+      clocks = <&cru HCLK_HDMI>;
+      clock-names = "hclk";
+      pinctrl-0 = <&hdmii2c_xfer>, <&hdmi_hpd>;
+      pinctrl-names = "default";
+      power-domains = <&power RK3066_PD_VIO>;
+      rockchip,grf = <&grf>;
+
+      ports {
+        #address-cells = <1>;
+        #size-cells = <0>;
+        hdmi_in: port@0 {
+          reg = <0>;
+          #address-cells = <1>;
+          #size-cells = <0>;
+          hdmi_in_vop0: endpoint@0 {
+            reg = <0>;
+            remote-endpoint = <&vop0_out_hdmi>;
+          };
+          hdmi_in_vop1: endpoint@1 {
+            reg = <1>;
+            remote-endpoint = <&vop1_out_hdmi>;
+          };
+        };
+        hdmi_out: port@1 {
+          reg = <1>;
+          hdmi_out_con: endpoint {
+            remote-endpoint = <&hdmi_con_in>;
+          };
+        };
+      };
+    };
+
+    pinctrl {
+      hdmi {
+        hdmi_hpd: hdmi-hpd {
+          rockchip,pins = <0 RK_PA0 1 &pcfg_pull_default>;
+        };
+        hdmii2c_xfer: hdmii2c-xfer {
+          rockchip,pins = <0 RK_PA1 1 &pcfg_pull_none>,
+                          <0 RK_PA2 1 &pcfg_pull_none>;
+        };
+      };
+    };
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt
deleted file mode 100644 (file)
index 8b3a5f5..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-device-tree bindings for rockchip soc display controller (vop)
-
-VOP (Visual Output Processor) is the Display Controller for the Rockchip
-series of SoCs which transfers the image data from a video memory
-buffer to an external LCD interface.
-
-Required properties:
-- compatible: value should be one of the following
-               "rockchip,rk3036-vop";
-               "rockchip,rk3126-vop";
-               "rockchip,px30-vop-lit";
-               "rockchip,px30-vop-big";
-               "rockchip,rk3066-vop";
-               "rockchip,rk3188-vop";
-               "rockchip,rk3288-vop";
-               "rockchip,rk3368-vop";
-               "rockchip,rk3366-vop";
-               "rockchip,rk3399-vop-big";
-               "rockchip,rk3399-vop-lit";
-               "rockchip,rk3228-vop";
-               "rockchip,rk3328-vop";
-
-- reg: Must contain one entry corresponding to the base address and length
-       of the register space. Can optionally contain a second entry
-       corresponding to the CRTC gamma LUT address.
-
-- interrupts: should contain a list of all VOP IP block interrupts in the
-                order: VSYNC, LCD_SYSTEM. The interrupt specifier
-                format depends on the interrupt controller used.
-
-- clocks: must include clock specifiers corresponding to entries in the
-               clock-names property.
-
-- clock-names: Must contain
-               aclk_vop: for ddr buffer transfer.
-               hclk_vop: for ahb bus to R/W the phy regs.
-               dclk_vop: pixel clock.
-
-- resets: Must contain an entry for each entry in reset-names.
-  See ../reset/reset.txt for details.
-- reset-names: Must include the following entries:
-  - axi
-  - ahb
-  - dclk
-
-- iommus: required a iommu node
-
-- port: A port node with endpoint definitions as defined in
-  Documentation/devicetree/bindings/media/video-interfaces.txt.
-
-Example:
-SoC specific DT entry:
-       vopb: vopb@ff930000 {
-               compatible = "rockchip,rk3288-vop";
-               reg = <0x0 0xff930000 0x0 0x19c>, <0x0 0xff931000 0x0 0x1000>;
-               interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&cru ACLK_VOP0>, <&cru DCLK_VOP0>, <&cru HCLK_VOP0>;
-               clock-names = "aclk_vop", "dclk_vop", "hclk_vop";
-               resets = <&cru SRST_LCDC1_AXI>, <&cru SRST_LCDC1_AHB>, <&cru SRST_LCDC1_DCLK>;
-               reset-names = "axi", "ahb", "dclk";
-               iommus = <&vopb_mmu>;
-               vopb_out: port {
-                       #address-cells = <1>;
-                       #size-cells = <0>;
-                       vopb_out_edp: endpoint@0 {
-                               reg = <0>;
-                               remote-endpoint=<&edp_in_vopb>;
-                       };
-                       vopb_out_hdmi: endpoint@1 {
-                               reg = <1>;
-                               remote-endpoint=<&hdmi_in_vopb>;
-                       };
-               };
-       };
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.yaml
new file mode 100644 (file)
index 0000000..1695e3e
--- /dev/null
@@ -0,0 +1,134 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/rockchip/rockchip-vop.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip SoC display controller (VOP)
+
+description:
+  VOP (Video Output Processor) is the display controller for the Rockchip
+  series of SoCs which transfers the image data from a video memory
+  buffer to an external LCD interface.
+
+maintainers:
+  - Sandy Huang <hjc@rock-chips.com>
+  - Heiko Stuebner <heiko@sntech.de>
+
+properties:
+  compatible:
+    enum:
+      - rockchip,px30-vop-big
+      - rockchip,px30-vop-lit
+      - rockchip,rk3036-vop
+      - rockchip,rk3066-vop
+      - rockchip,rk3126-vop
+      - rockchip,rk3188-vop
+      - rockchip,rk3228-vop
+      - rockchip,rk3288-vop
+      - rockchip,rk3328-vop
+      - rockchip,rk3366-vop
+      - rockchip,rk3368-vop
+      - rockchip,rk3399-vop-big
+      - rockchip,rk3399-vop-lit
+
+  reg:
+    minItems: 1
+    items:
+      - description:
+          Must contain one entry corresponding to the base address and length
+          of the register space.
+      - description:
+          Can optionally contain a second entry corresponding to
+          the CRTC gamma LUT address.
+
+  interrupts:
+    maxItems: 1
+    description:
+      The VOP interrupt is shared by several interrupt sources, such as
+      frame start (VSYNC), line flag and other status interrupts.
+
+  clocks:
+    items:
+      - description: Clock for ddr buffer transfer.
+      - description: Pixel clock.
+      - description: Clock for the ahb bus to R/W the phy regs.
+
+  clock-names:
+    items:
+      - const: aclk_vop
+      - const: dclk_vop
+      - const: hclk_vop
+
+  resets:
+    maxItems: 3
+
+  reset-names:
+    items:
+      - const: axi
+      - const: ahb
+      - const: dclk
+
+  port:
+    type: object
+    description:
+      A port node with endpoint definitions as defined in
+      Documentation/devicetree/bindings/media/video-interfaces.txt.
+
+  assigned-clocks:
+    maxItems: 2
+
+  assigned-clock-rates:
+    maxItems: 2
+
+  iommus:
+    maxItems: 1
+
+  power-domains:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+  - clock-names
+  - resets
+  - reset-names
+  - port
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/clock/rk3288-cru.h>
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/power/rk3288-power.h>
+    vopb: vopb@ff930000 {
+      compatible = "rockchip,rk3288-vop";
+      reg = <0x0 0xff930000 0x0 0x19c>,
+            <0x0 0xff931000 0x0 0x1000>;
+      interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+      clocks = <&cru ACLK_VOP0>,
+               <&cru DCLK_VOP0>,
+               <&cru HCLK_VOP0>;
+      clock-names = "aclk_vop", "dclk_vop", "hclk_vop";
+      power-domains = <&power RK3288_PD_VIO>;
+      resets = <&cru SRST_LCDC1_AXI>,
+               <&cru SRST_LCDC1_AHB>,
+               <&cru SRST_LCDC1_DCLK>;
+      reset-names = "axi", "ahb", "dclk";
+      iommus = <&vopb_mmu>;
+      vopb_out: port {
+        #address-cells = <1>;
+        #size-cells = <0>;
+        vopb_out_edp: endpoint@0 {
+          reg = <0>;
+          remote-endpoint=<&edp_in_vopb>;
+        };
+        vopb_out_hdmi: endpoint@1 {
+          reg = <1>;
+          remote-endpoint=<&hdmi_in_vopb>;
+        };
+      };
+    };
index d3277fe6640bd8b0c7bc50d2f3da493172edf07d..997934c58f9a3d2672265ecd1695084c47d81cc4 100644 (file)
@@ -187,6 +187,8 @@ patternProperties:
     description: ChipOne
   "^chipspark,.*":
     description: ChipSPARK
+  "^chrontel,.*":
+    description: Chrontel, Inc.
   "^chrp,.*":
     description: Common Hardware Reference Platform
   "^chunghwa,.*":
@@ -463,6 +465,8 @@ patternProperties:
     description: Infineon Technologies
   "^inforce,.*":
     description: Inforce Computing
+  "^ivo,.*":
+    description: InfoVision Optoelectronics Kunshan Co. Ltd.
   "^ingenic,.*":
     description: Ingenic Semiconductor
   "^innolux,.*":
@@ -488,7 +492,7 @@ patternProperties:
   "^issi,.*":
     description: Integrated Silicon Solutions Inc.
   "^ite,.*":
-    description: ITE Tech, Inc.
+    description: ITE Tech. Inc.
   "^itead,.*":
     description: ITEAD Intelligent Systems Co.Ltd
   "^iwave,.*":
@@ -1041,6 +1045,8 @@ patternProperties:
     description: Tronsmart
   "^truly,.*":
     description: Truly Semiconductors Limited
+  "^visionox,.*":
+    description: Visionox
   "^tsd,.*":
     description: Theobroma Systems Design und Consulting GmbH
   "^tyan,.*":
index 0efede580039cdcbb074983521731f59d3a7b946..4cc74325bf91417d5523de7a7e23b5477325aaf2 100644 (file)
@@ -202,3 +202,91 @@ busy_percent
 
 .. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
    :doc: busy_percent
+
+GPU Product Information
+=======================
+
+Information about the GPU can be obtained on certain cards
+via sysfs
+
+product_name
+------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+   :doc: product_name
+
+product_number
+--------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+   :doc: product_name
+
+serial_number
+-------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+   :doc: serial_number
+
+unique_id
+---------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+   :doc: unique_id
+
+GPU Memory Usage Information
+============================
+
+Various memory accounting can be accessed via sysfs
+
+mem_info_vram_total
+-------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+   :doc: mem_info_vram_total
+
+mem_info_vram_used
+------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+   :doc: mem_info_vram_used
+
+mem_info_vis_vram_total
+-----------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+   :doc: mem_info_vis_vram_total
+
+mem_info_vis_vram_used
+----------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+   :doc: mem_info_vis_vram_used
+
+mem_info_gtt_total
+------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+   :doc: mem_info_gtt_total
+
+mem_info_gtt_used
+-----------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+   :doc: mem_info_gtt_used
+
+PCIe Accounting Information
+===========================
+
+pcie_bw
+-------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+   :doc: pcie_bw
+
+pcie_replay_count
+-----------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+   :doc: pcie_replay_count
+
+
index a73320576ca9a18e71705694dd3c833c83b62a47..12272b168580be9aa87e86644c3d94770351a2b3 100644 (file)
@@ -132,6 +132,18 @@ be unmapped; on many devices, the ROM address decoder is shared with
 other BARs, so leaving it mapped could cause undesired behaviour like
 hangs or memory corruption.
 
+Managed Resources
+-----------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_managed.c
+   :doc: managed resources
+
+.. kernel-doc:: drivers/gpu/drm/drm_managed.c
+   :export:
+
+.. kernel-doc:: include/drm/drm_managed.h
+   :internal:
+
 Bus-specific Device Registration and PCI Support
 ------------------------------------------------
 
index 906771e03103240fd6fa2051631ff45ce7358ef7..397314d08f77f573de8922fad53d5ab2425d836c 100644 (file)
@@ -3,7 +3,7 @@ Kernel Mode Setting (KMS)
 =========================
 
 Drivers must initialize the mode setting core by calling
-drm_mode_config_init() on the DRM device. The function
+drmm_mode_config_init() on the DRM device. The function
 initializes the :c:type:`struct drm_device <drm_device>`
 mode_config field and never fails. Once done, mode configuration must
 be setup by initializing the following fields.
@@ -397,6 +397,9 @@ Connector Functions Reference
 Writeback Connectors
 --------------------
 
+.. kernel-doc:: include/drm/drm_writeback.h
+  :internal:
+
 .. kernel-doc:: drivers/gpu/drm/drm_writeback.c
   :doc: overview
 
index c77b326012606413493c76efd52d7622eebba296..1839762044be18f7583b2d28955a375d814347d6 100644 (file)
@@ -373,15 +373,6 @@ GEM CMA Helper Functions Reference
 .. kernel-doc:: drivers/gpu/drm/drm_gem_cma_helper.c
    :export:
 
-VRAM Helper Function Reference
-==============================
-
-.. kernel-doc:: drivers/gpu/drm/drm_vram_helper_common.c
-   :doc: overview
-
-.. kernel-doc:: include/drm/drm_gem_vram_helper.h
-   :internal:
-
 GEM VRAM Helper Functions Reference
 -----------------------------------
 
index f6d363b6756e9e5a6432954f7d94278b986eb85a..33cc6ddf8f645ffb923dd84ef5c6234311234752 100644 (file)
@@ -329,6 +329,52 @@ for execution also include a list of all locations within buffers that
 refer to GPU-addresses so that the kernel can edit the buffer correctly.
 This process is dubbed relocation.
 
+Locking Guidelines
+------------------
+
+.. note::
+   This is a description of how the locking should be after
+   refactoring is done. Does not necessarily reflect what the locking
+   looks like while WIP.
+
+#. All locking rules and interface contracts with cross-driver interfaces
+   (dma-buf, dma_fence) need to be followed.
+
+#. No struct_mutex anywhere in the code
+
+#. dma_resv will be the outermost lock (when needed) and ww_acquire_ctx
+   is to be hoisted at highest level and passed down within i915_gem_ctx
+   in the call chain
+
+#. While holding lru/memory manager (buddy, drm_mm, whatever) locks
+   system memory allocations are not allowed
+
+       * Enforce this by priming lockdep (with fs_reclaim). If we
+         allocate memory while holding these looks we get a rehash
+         of the shrinker vs. struct_mutex saga, and that would be
+         real bad.
+
+#. Do not nest different lru/memory manager locks within each other.
+   Take them in turn to update memory allocations, relying on the object’s
+   dma_resv ww_mutex to serialize against other operations.
+
+#. The suggestion for lru/memory managers locks is that they are small
+   enough to be spinlocks.
+
+#. All features need to come with exhaustive kernel selftests and/or
+   IGT tests when appropriate
+
+#. All LMEM uAPI paths need to be fully restartable (_interruptible()
+   for all locks/waits/sleeps)
+
+       * Error handling validation through signal injection.
+         Still the best strategy we have for validating GEM uAPI
+          corner cases.
+         Must be excessively used in the IGT, and we need to check
+         that we really have full path coverage of all error cases.
+
+       * -EDEADLK handling with ww_mutex
+
 GEM BO Management Implementation Details
 ----------------------------------------
 
@@ -391,19 +437,19 @@ Global GTT views
 GTT Fences and Swizzling
 ------------------------
 
-.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_fence_reg.c
+.. kernel-doc:: drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
    :internal:
 
 Global GTT Fence Handling
 ~~~~~~~~~~~~~~~~~~~~~~~~~
 
-.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_fence_reg.c
+.. kernel-doc:: drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
    :doc: fence register handling
 
 Hardware Tiling and Swizzling Details
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_fence_reg.c
+.. kernel-doc:: drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
    :doc: tiling swizzling details
 
 Object Tiling IOCTLs
index 439656f55c5da8ec957f2da313d1ca430bd59714..658b52f7ffc6c3c667987a465810b059f1ed733d 100644 (file)
@@ -347,18 +347,6 @@ Contact: Sean Paul
 
 Level: Starter
 
-Remove drm_display_mode.hsync
------------------------------
-
-We have drm_mode_hsync() to calculate this from hsync_start/end, since drivers
-shouldn't/don't use this, remove this member to avoid any temptations to use it
-in the future. If there is any debug code using drm_display_mode.hsync, convert
-it to use drm_mode_hsync() instead.
-
-Contact: Sean Paul
-
-Level: Starter
-
 connector register/unregister fixes
 -----------------------------------
 
index ef7414d7de3f0dd88c7fed9d570a1d2b08a48824..a439da570dc3586c40880b3a92e2f66b5803a800 100644 (file)
@@ -5062,7 +5062,7 @@ F:        drivers/dma-buf/
 F:     include/linux/*fence.h
 F:     include/linux/dma-buf*
 F:     include/linux/dma-resv.h
-K:     dma_(buf|fence|resv)
+K:     \bdma_(?:buf|fence|resv)\b
 
 DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
 M:     Vinod Koul <vkoul@kernel.org>
@@ -5274,7 +5274,7 @@ DRM DRIVER FOR ARM VERSATILE TFT PANELS
 M:     Linus Walleij <linus.walleij@linaro.org>
 S:     Maintained
 T:     git git://anongit.freedesktop.org/drm/drm-misc
-F:     Documentation/devicetree/bindings/display/panel/arm,versatile-tft-panel.txt
+F:     Documentation/devicetree/bindings/display/panel/arm,versatile-tft-panel.yaml
 F:     drivers/gpu/drm/panel/panel-arm-versatile.c
 
 DRM DRIVER FOR ASPEED BMC GFX
@@ -5300,7 +5300,7 @@ F:        drivers/gpu/drm/bochs/
 DRM DRIVER FOR BOE HIMAX8279D PANELS
 M:     Jerry Han <hanxu5@huaqin.corp-partner.google.com>
 S:     Maintained
-F:     Documentation/devicetree/bindings/display/panel/boe,himax8279d.txt
+F:     Documentation/devicetree/bindings/display/panel/boe,himax8279d.yaml
 F:     drivers/gpu/drm/panel/panel-boe-himax8279d.c
 
 DRM DRIVER FOR FARADAY TVE200 TV ENCODER
@@ -5318,7 +5318,7 @@ F:        drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
 DRM DRIVER FOR FEIYANG FY07024DI26A30-D MIPI-DSI LCD PANELS
 M:     Jagan Teki <jagan@amarulasolutions.com>
 S:     Maintained
-F:     Documentation/devicetree/bindings/display/panel/feiyang,fy07024di26a30d.txt
+F:     Documentation/devicetree/bindings/display/panel/feiyang,fy07024di26a30d.yaml
 F:     drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
 
 DRM DRIVER FOR GRAIN MEDIA GM12U320 PROJECTORS
@@ -5353,6 +5353,14 @@ S:       Orphan / Obsolete
 F:     drivers/gpu/drm/i810/
 F:     include/uapi/drm/i810_drm.h
 
+DRM DRIVER FOR LVDS PANELS
+M:     Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+L:     dri-devel@lists.freedesktop.org
+T:     git git://anongit.freedesktop.org/drm/drm-misc
+S:     Maintained
+F:     drivers/gpu/drm/panel/panel-lvds.c
+F:     Documentation/devicetree/bindings/display/panel/lvds.yaml
+
 DRM DRIVER FOR MATROX G200/G400 GRAPHICS CARDS
 S:     Orphan / Obsolete
 F:     drivers/gpu/drm/mga/
@@ -5401,7 +5409,7 @@ F:        include/uapi/drm/nouveau_drm.h
 DRM DRIVER FOR OLIMEX LCD-OLINUXINO PANELS
 M:     Stefan Mavrodiev <stefan@olimex.com>
 S:     Maintained
-F:     Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.txt
+F:     Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.yaml
 F:     drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
 
 DRM DRIVER FOR PERVASIVE DISPLAYS REPAPER PANELS
@@ -5418,7 +5426,7 @@ L:        virtualization@lists.linux-foundation.org
 S:     Obsolete
 W:     https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
 T:     git git://anongit.freedesktop.org/drm/drm-misc
-F:     drivers/gpu/drm/cirrus/
+F:     drivers/gpu/drm/tiny/cirrus.c
 
 DRM DRIVER FOR QXL VIRTUAL GPU
 M:     Dave Airlie <airlied@redhat.com>
@@ -5468,7 +5476,7 @@ F:        drivers/gpu/drm/tiny/st7586.c
 DRM DRIVER FOR SITRONIX ST7701 PANELS
 M:     Jagan Teki <jagan@amarulasolutions.com>
 S:     Maintained
-F:     Documentation/devicetree/bindings/display/panel/sitronix,st7701.txt
+F:     Documentation/devicetree/bindings/display/panel/sitronix,st7701.yaml
 F:     drivers/gpu/drm/panel/panel-sitronix-st7701.c
 
 DRM DRIVER FOR SITRONIX ST7735R PANELS
@@ -14141,7 +14149,6 @@ F:      drivers/net/wireless/quantenna
 RADEON and AMDGPU DRM DRIVERS
 M:     Alex Deucher <alexander.deucher@amd.com>
 M:     Christian König <christian.koenig@amd.com>
-M:     David (ChunMing) Zhou <David1.Zhou@amd.com>
 L:     amd-gfx@lists.freedesktop.org
 S:     Supported
 T:     git git://people.freedesktop.org/~agd5f/linux
index 5bc82e2671c6e69944d11f8d55446873e442a274..351f891b4842e4d0453b047adb8d98995f2a7081 100644 (file)
@@ -104,6 +104,14 @@ static struct fixed_voltage_config shannon_cf_vcc_pdata __initdata = {
        .enabled_at_boot = 1,
 };
 
+static struct gpiod_lookup_table shannon_display_gpio_table = {
+       .dev_id = "sa11x0-fb",
+       .table = {
+               GPIO_LOOKUP("gpio", 22, "shannon-lcden", GPIO_ACTIVE_HIGH),
+               { },
+       },
+};
+
 static void __init shannon_init(void)
 {
        sa11x0_register_fixed_regulator(0, &shannon_cf_vcc_pdata,
@@ -113,6 +121,7 @@ static void __init shannon_init(void)
        sa11x0_register_pcmcia(0, &shannon_pcmcia0_gpio_table);
        sa11x0_register_pcmcia(1, &shannon_pcmcia1_gpio_table);
        sa11x0_ppc_configure_mcp();
+       gpiod_add_lookup_table(&shannon_display_gpio_table);
        sa11x0_register_lcd(&shannon_lcd_info);
        sa11x0_register_mtd(&shannon_flash_data, &shannon_flash_resource, 1);
        sa11x0_register_mcp(&shannon_mcp_data);
index 66a62d17a3f51ce80bd7220d2110b1225e341a94..4b34a5195c653dc8a7757bc861fcae9eb45a57c5 100644 (file)
@@ -846,6 +846,7 @@ void intel_gtt_insert_page(dma_addr_t addr,
                           unsigned int flags)
 {
        intel_private.driver->write_entry(addr, pg, flags);
+       readl(intel_private.gtt + pg);
        if (intel_private.driver->chipset_flush)
                intel_private.driver->chipset_flush();
 }
@@ -871,7 +872,7 @@ void intel_gtt_insert_sg_entries(struct sg_table *st,
                        j++;
                }
        }
-       wmb();
+       readl(intel_private.gtt + j - 1);
        if (intel_private.driver->chipset_flush)
                intel_private.driver->chipset_flush();
 }
@@ -1105,6 +1106,7 @@ static void i9xx_cleanup(void)
 
 static void i9xx_chipset_flush(void)
 {
+       wmb();
        if (intel_private.i9xx_flush_page)
                writel(1, intel_private.i9xx_flush_page);
 }
@@ -1405,13 +1407,16 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
 
        dev_info(&bridge_pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
 
-       mask = intel_private.driver->dma_mask_size;
-       if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
-               dev_err(&intel_private.pcidev->dev,
-                       "set gfx device dma mask %d-bit failed!\n", mask);
-       else
-               pci_set_consistent_dma_mask(intel_private.pcidev,
-                                           DMA_BIT_MASK(mask));
+       if (bridge) {
+               mask = intel_private.driver->dma_mask_size;
+               if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
+                       dev_err(&intel_private.pcidev->dev,
+                               "set gfx device dma mask %d-bit failed!\n",
+                               mask);
+               else
+                       pci_set_consistent_dma_mask(intel_private.pcidev,
+                                                   DMA_BIT_MASK(mask));
+       }
 
        if (intel_gtt_init() != 0) {
                intel_gmch_remove();
index 9c190026bfab1f9b34169206a6034a5b63384603..995e05f609ff0b549026cb33d465e210aa174f24 100644 (file)
@@ -9,6 +9,7 @@ obj-$(CONFIG_UDMABUF)           += udmabuf.o
 
 dmabuf_selftests-y := \
        selftest.o \
-       st-dma-fence.o
+       st-dma-fence.o \
+       st-dma-fence-chain.o
 
 obj-$(CONFIG_DMABUF_SELFTESTS) += dmabuf_selftests.o
index 07df88f2e3057e0c9def11f24b3d430823d30350..01ce125f8e8d8a864b82657f095923da4cfa2715 100644 (file)
@@ -691,6 +691,8 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
 
        attach->dev = dev;
        attach->dmabuf = dmabuf;
+       if (importer_ops)
+               attach->peer2peer = importer_ops->allow_peer2peer;
        attach->importer_ops = importer_ops;
        attach->importer_priv = importer_priv;
 
index 44a741677d2524c88d6a3cc056903a4bb31487bc..c435bbba851c774727b152c2eed875aab56e78d6 100644 (file)
@@ -62,7 +62,8 @@ struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence)
                        replacement = NULL;
                }
 
-               tmp = cmpxchg((void **)&chain->prev, (void *)prev, (void *)replacement);
+               tmp = cmpxchg((struct dma_fence __force **)&chain->prev,
+                             prev, replacement);
                if (tmp == prev)
                        dma_fence_put(tmp);
                else
@@ -98,6 +99,12 @@ int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno)
                return -EINVAL;
 
        dma_fence_chain_for_each(*pfence, &chain->base) {
+               if ((*pfence)->seqno < seqno) { /* already signaled */
+                       dma_fence_put(*pfence);
+                       *pfence = NULL;
+                       break;
+               }
+
                if ((*pfence)->context != chain->base.context ||
                    to_dma_fence_chain(*pfence)->prev_seqno < seqno)
                        break;
@@ -221,6 +228,7 @@ EXPORT_SYMBOL(dma_fence_chain_ops);
  * @chain: the chain node to initialize
  * @prev: the previous fence
  * @fence: the current fence
+ * @seqno: the sequence number (syncpt) of the fence within the chain
  *
  * Initialize a new chain node and either start a new chain or add the node to
  * the existing chain of the previous fence.
index 052a41e2451c182068164816d35c14580c789386..90edf2b281b049381ebfc97d467b15e207a16b83 100644 (file)
@@ -106,7 +106,7 @@ EXPORT_SYMBOL(dma_fence_get_stub);
 u64 dma_fence_context_alloc(unsigned num)
 {
        WARN_ON(!num);
-       return atomic64_add_return(num, &dma_fence_context_counter) - num;
+       return atomic64_fetch_add(num, &dma_fence_context_counter);
 }
 EXPORT_SYMBOL(dma_fence_context_alloc);
 
index 5320386f02e5f3f721f8e2e551475bcd6078173c..55918ef9adab25a33d3ca7f56186ac27729af317 100644 (file)
@@ -11,3 +11,4 @@
  */
 selftest(sanitycheck, __sanitycheck__) /* keep first (igt selfcheck) */
 selftest(dma_fence, dma_fence)
+selftest(dma_fence_chain, dma_fence_chain)
diff --git a/drivers/dma-buf/st-dma-fence-chain.c b/drivers/dma-buf/st-dma-fence-chain.c
new file mode 100644 (file)
index 0000000..5d45ba7
--- /dev/null
@@ -0,0 +1,715 @@
+// SPDX-License-Identifier: MIT
+
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-fence.h>
+#include <linux/dma-fence-chain.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/mm.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/random.h>
+
+#include "selftest.h"
+
+#define CHAIN_SZ (4 << 10)
+
+static struct kmem_cache *slab_fences;
+
+static inline struct mock_fence {
+       struct dma_fence base;
+       spinlock_t lock;
+} *to_mock_fence(struct dma_fence *f) {
+       return container_of(f, struct mock_fence, base);
+}
+
+static const char *mock_name(struct dma_fence *f)
+{
+       return "mock";
+}
+
+static void mock_fence_release(struct dma_fence *f)
+{
+       kmem_cache_free(slab_fences, to_mock_fence(f));
+}
+
+static const struct dma_fence_ops mock_ops = {
+       .get_driver_name = mock_name,
+       .get_timeline_name = mock_name,
+       .release = mock_fence_release,
+};
+
+static struct dma_fence *mock_fence(void)
+{
+       struct mock_fence *f;
+
+       f = kmem_cache_alloc(slab_fences, GFP_KERNEL);
+       if (!f)
+               return NULL;
+
+       spin_lock_init(&f->lock);
+       dma_fence_init(&f->base, &mock_ops, &f->lock, 0, 0);
+
+       return &f->base;
+}
+
+static inline struct mock_chain {
+       struct dma_fence_chain base;
+} *to_mock_chain(struct dma_fence *f) {
+       return container_of(f, struct mock_chain, base.base);
+}
+
+static struct dma_fence *mock_chain(struct dma_fence *prev,
+                                   struct dma_fence *fence,
+                                   u64 seqno)
+{
+       struct mock_chain *f;
+
+       f = kmalloc(sizeof(*f), GFP_KERNEL);
+       if (!f)
+               return NULL;
+
+       dma_fence_chain_init(&f->base,
+                            dma_fence_get(prev),
+                            dma_fence_get(fence),
+                            seqno);
+
+       return &f->base.base;
+}
+
+static int sanitycheck(void *arg)
+{
+       struct dma_fence *f, *chain;
+       int err = 0;
+
+       f = mock_fence();
+       if (!f)
+               return -ENOMEM;
+
+       chain = mock_chain(NULL, f, 1);
+       if (!chain)
+               err = -ENOMEM;
+
+       dma_fence_signal(f);
+       dma_fence_put(f);
+
+       dma_fence_put(chain);
+
+       return err;
+}
+
+struct fence_chains {
+       unsigned int chain_length;
+       struct dma_fence **fences;
+       struct dma_fence **chains;
+
+       struct dma_fence *tail;
+};
+
+static uint64_t seqno_inc(unsigned int i)
+{
+       return i + 1;
+}
+
+static int fence_chains_init(struct fence_chains *fc, unsigned int count,
+                            uint64_t (*seqno_fn)(unsigned int))
+{
+       unsigned int i;
+       int err = 0;
+
+       fc->chains = kvmalloc_array(count, sizeof(*fc->chains),
+                                   GFP_KERNEL | __GFP_ZERO);
+       if (!fc->chains)
+               return -ENOMEM;
+
+       fc->fences = kvmalloc_array(count, sizeof(*fc->fences),
+                                   GFP_KERNEL | __GFP_ZERO);
+       if (!fc->fences) {
+               err = -ENOMEM;
+               goto err_chains;
+       }
+
+       fc->tail = NULL;
+       for (i = 0; i < count; i++) {
+               fc->fences[i] = mock_fence();
+               if (!fc->fences[i]) {
+                       err = -ENOMEM;
+                       goto unwind;
+               }
+
+               fc->chains[i] = mock_chain(fc->tail,
+                                          fc->fences[i],
+                                          seqno_fn(i));
+               if (!fc->chains[i]) {
+                       err = -ENOMEM;
+                       goto unwind;
+               }
+
+               fc->tail = fc->chains[i];
+       }
+
+       fc->chain_length = i;
+       return 0;
+
+unwind:
+       for (i = 0; i < count; i++) {
+               dma_fence_put(fc->fences[i]);
+               dma_fence_put(fc->chains[i]);
+       }
+       kvfree(fc->fences);
+err_chains:
+       kvfree(fc->chains);
+       return err;
+}
+
+static void fence_chains_fini(struct fence_chains *fc)
+{
+       unsigned int i;
+
+       for (i = 0; i < fc->chain_length; i++) {
+               dma_fence_signal(fc->fences[i]);
+               dma_fence_put(fc->fences[i]);
+       }
+       kvfree(fc->fences);
+
+       for (i = 0; i < fc->chain_length; i++)
+               dma_fence_put(fc->chains[i]);
+       kvfree(fc->chains);
+}
+
+static int find_seqno(void *arg)
+{
+       struct fence_chains fc;
+       struct dma_fence *fence;
+       int err;
+       int i;
+
+       err = fence_chains_init(&fc, 64, seqno_inc);
+       if (err)
+               return err;
+
+       fence = dma_fence_get(fc.tail);
+       err = dma_fence_chain_find_seqno(&fence, 0);
+       dma_fence_put(fence);
+       if (err) {
+               pr_err("Reported %d for find_seqno(0)!\n", err);
+               goto err;
+       }
+
+       for (i = 0; i < fc.chain_length; i++) {
+               fence = dma_fence_get(fc.tail);
+               err = dma_fence_chain_find_seqno(&fence, i + 1);
+               dma_fence_put(fence);
+               if (err) {
+                       pr_err("Reported %d for find_seqno(%d:%d)!\n",
+                              err, fc.chain_length + 1, i + 1);
+                       goto err;
+               }
+               if (fence != fc.chains[i]) {
+                       pr_err("Incorrect fence reported by find_seqno(%d:%d)\n",
+                              fc.chain_length + 1, i + 1);
+                       err = -EINVAL;
+                       goto err;
+               }
+
+               dma_fence_get(fence);
+               err = dma_fence_chain_find_seqno(&fence, i + 1);
+               dma_fence_put(fence);
+               if (err) {
+                       pr_err("Error reported for finding self\n");
+                       goto err;
+               }
+               if (fence != fc.chains[i]) {
+                       pr_err("Incorrect fence reported by find self\n");
+                       err = -EINVAL;
+                       goto err;
+               }
+
+               dma_fence_get(fence);
+               err = dma_fence_chain_find_seqno(&fence, i + 2);
+               dma_fence_put(fence);
+               if (!err) {
+                       pr_err("Error not reported for future fence: find_seqno(%d:%d)!\n",
+                              i + 1, i + 2);
+                       err = -EINVAL;
+                       goto err;
+               }
+
+               dma_fence_get(fence);
+               err = dma_fence_chain_find_seqno(&fence, i);
+               dma_fence_put(fence);
+               if (err) {
+                       pr_err("Error reported for previous fence!\n");
+                       goto err;
+               }
+               if (i > 0 && fence != fc.chains[i - 1]) {
+                       pr_err("Incorrect fence reported by find_seqno(%d:%d)\n",
+                              i + 1, i);
+                       err = -EINVAL;
+                       goto err;
+               }
+       }
+
+err:
+       fence_chains_fini(&fc);
+       return err;
+}
+
+static int find_signaled(void *arg)
+{
+       struct fence_chains fc;
+       struct dma_fence *fence;
+       int err;
+
+       err = fence_chains_init(&fc, 2, seqno_inc);
+       if (err)
+               return err;
+
+       dma_fence_signal(fc.fences[0]);
+
+       fence = dma_fence_get(fc.tail);
+       err = dma_fence_chain_find_seqno(&fence, 1);
+       dma_fence_put(fence);
+       if (err) {
+               pr_err("Reported %d for find_seqno()!\n", err);
+               goto err;
+       }
+
+       if (fence && fence != fc.chains[0]) {
+               pr_err("Incorrect chain-fence.seqno:%lld reported for completed seqno:1\n",
+                      fence->seqno);
+
+               dma_fence_get(fence);
+               err = dma_fence_chain_find_seqno(&fence, 1);
+               dma_fence_put(fence);
+               if (err)
+                       pr_err("Reported %d for finding self!\n", err);
+
+               err = -EINVAL;
+       }
+
+err:
+       fence_chains_fini(&fc);
+       return err;
+}
+
+static int find_out_of_order(void *arg)
+{
+       struct fence_chains fc;
+       struct dma_fence *fence;
+       int err;
+
+       err = fence_chains_init(&fc, 3, seqno_inc);
+       if (err)
+               return err;
+
+       dma_fence_signal(fc.fences[1]);
+
+       fence = dma_fence_get(fc.tail);
+       err = dma_fence_chain_find_seqno(&fence, 2);
+       dma_fence_put(fence);
+       if (err) {
+               pr_err("Reported %d for find_seqno()!\n", err);
+               goto err;
+       }
+
+       if (fence && fence != fc.chains[1]) {
+               pr_err("Incorrect chain-fence.seqno:%lld reported for completed seqno:2\n",
+                      fence->seqno);
+
+               dma_fence_get(fence);
+               err = dma_fence_chain_find_seqno(&fence, 2);
+               dma_fence_put(fence);
+               if (err)
+                       pr_err("Reported %d for finding self!\n", err);
+
+               err = -EINVAL;
+       }
+
+err:
+       fence_chains_fini(&fc);
+       return err;
+}
+
+static uint64_t seqno_inc2(unsigned int i)
+{
+       return 2 * i + 2;
+}
+
+static int find_gap(void *arg)
+{
+       struct fence_chains fc;
+       struct dma_fence *fence;
+       int err;
+       int i;
+
+       err = fence_chains_init(&fc, 64, seqno_inc2);
+       if (err)
+               return err;
+
+       for (i = 0; i < fc.chain_length; i++) {
+               fence = dma_fence_get(fc.tail);
+               err = dma_fence_chain_find_seqno(&fence, 2 * i + 1);
+               dma_fence_put(fence);
+               if (err) {
+                       pr_err("Reported %d for find_seqno(%d:%d)!\n",
+                              err, fc.chain_length + 1, 2 * i + 1);
+                       goto err;
+               }
+               if (fence != fc.chains[i]) {
+                       pr_err("Incorrect fence.seqno:%lld reported by find_seqno(%d:%d)\n",
+                              fence->seqno,
+                              fc.chain_length + 1,
+                              2 * i + 1);
+                       err = -EINVAL;
+                       goto err;
+               }
+
+               dma_fence_get(fence);
+               err = dma_fence_chain_find_seqno(&fence, 2 * i + 2);
+               dma_fence_put(fence);
+               if (err) {
+                       pr_err("Error reported for finding self\n");
+                       goto err;
+               }
+               if (fence != fc.chains[i]) {
+                       pr_err("Incorrect fence reported by find self\n");
+                       err = -EINVAL;
+                       goto err;
+               }
+       }
+
+err:
+       fence_chains_fini(&fc);
+       return err;
+}
+
+struct find_race {
+       struct fence_chains fc;
+       atomic_t children;
+};
+
+static int __find_race(void *arg)
+{
+       struct find_race *data = arg;
+       int err = 0;
+
+       while (!kthread_should_stop()) {
+               struct dma_fence *fence = dma_fence_get(data->fc.tail);
+               int seqno;
+
+               seqno = prandom_u32_max(data->fc.chain_length) + 1;
+
+               err = dma_fence_chain_find_seqno(&fence, seqno);
+               if (err) {
+                       pr_err("Failed to find fence seqno:%d\n",
+                              seqno);
+                       dma_fence_put(fence);
+                       break;
+               }
+               if (!fence)
+                       goto signal;
+
+               err = dma_fence_chain_find_seqno(&fence, seqno);
+               if (err) {
+                       pr_err("Reported an invalid fence for find-self:%d\n",
+                              seqno);
+                       dma_fence_put(fence);
+                       break;
+               }
+
+               if (fence->seqno < seqno) {
+                       pr_err("Reported an earlier fence.seqno:%lld for seqno:%d\n",
+                              fence->seqno, seqno);
+                       err = -EINVAL;
+                       dma_fence_put(fence);
+                       break;
+               }
+
+               dma_fence_put(fence);
+
+signal:
+               seqno = prandom_u32_max(data->fc.chain_length - 1);
+               dma_fence_signal(data->fc.fences[seqno]);
+               cond_resched();
+       }
+
+       if (atomic_dec_and_test(&data->children))
+               wake_up_var(&data->children);
+       return err;
+}
+
+static int find_race(void *arg)
+{
+       struct find_race data;
+       int ncpus = num_online_cpus();
+       struct task_struct **threads;
+       unsigned long count;
+       int err;
+       int i;
+
+       err = fence_chains_init(&data.fc, CHAIN_SZ, seqno_inc);
+       if (err)
+               return err;
+
+       threads = kmalloc_array(ncpus, sizeof(*threads), GFP_KERNEL);
+       if (!threads) {
+               err = -ENOMEM;
+               goto err;
+       }
+
+       atomic_set(&data.children, 0);
+       for (i = 0; i < ncpus; i++) {
+               threads[i] = kthread_run(__find_race, &data, "dmabuf/%d", i);
+               if (IS_ERR(threads[i])) {
+                       ncpus = i;
+                       break;
+               }
+               atomic_inc(&data.children);
+               get_task_struct(threads[i]);
+       }
+
+       wait_var_event_timeout(&data.children,
+                              !atomic_read(&data.children),
+                              5 * HZ);
+
+       for (i = 0; i < ncpus; i++) {
+               int ret;
+
+               ret = kthread_stop(threads[i]);
+               if (ret && !err)
+                       err = ret;
+               put_task_struct(threads[i]);
+       }
+       kfree(threads);
+
+       count = 0;
+       for (i = 0; i < data.fc.chain_length; i++)
+               if (dma_fence_is_signaled(data.fc.fences[i]))
+                       count++;
+       pr_info("Completed %lu cycles\n", count);
+
+err:
+       fence_chains_fini(&data.fc);
+       return err;
+}
+
+static int signal_forward(void *arg)
+{
+       struct fence_chains fc;
+       int err;
+       int i;
+
+       err = fence_chains_init(&fc, 64, seqno_inc);
+       if (err)
+               return err;
+
+       for (i = 0; i < fc.chain_length; i++) {
+               dma_fence_signal(fc.fences[i]);
+
+               if (!dma_fence_is_signaled(fc.chains[i])) {
+                       pr_err("chain[%d] not signaled!\n", i);
+                       err = -EINVAL;
+                       goto err;
+               }
+
+               if (i + 1 < fc.chain_length &&
+                   dma_fence_is_signaled(fc.chains[i + 1])) {
+                       pr_err("chain[%d] is signaled!\n", i);
+                       err = -EINVAL;
+                       goto err;
+               }
+       }
+
+err:
+       fence_chains_fini(&fc);
+       return err;
+}
+
+static int signal_backward(void *arg)
+{
+       struct fence_chains fc;
+       int err;
+       int i;
+
+       err = fence_chains_init(&fc, 64, seqno_inc);
+       if (err)
+               return err;
+
+       for (i = fc.chain_length; i--; ) {
+               dma_fence_signal(fc.fences[i]);
+
+               if (i > 0 && dma_fence_is_signaled(fc.chains[i])) {
+                       pr_err("chain[%d] is signaled!\n", i);
+                       err = -EINVAL;
+                       goto err;
+               }
+       }
+
+       for (i = 0; i < fc.chain_length; i++) {
+               if (!dma_fence_is_signaled(fc.chains[i])) {
+                       pr_err("chain[%d] was not signaled!\n", i);
+                       err = -EINVAL;
+                       goto err;
+               }
+       }
+
+err:
+       fence_chains_fini(&fc);
+       return err;
+}
+
+static int __wait_fence_chains(void *arg)
+{
+       struct fence_chains *fc = arg;
+
+       if (dma_fence_wait(fc->tail, false))
+               return -EIO;
+
+       return 0;
+}
+
+static int wait_forward(void *arg)
+{
+       struct fence_chains fc;
+       struct task_struct *tsk;
+       int err;
+       int i;
+
+       err = fence_chains_init(&fc, CHAIN_SZ, seqno_inc);
+       if (err)
+               return err;
+
+       tsk = kthread_run(__wait_fence_chains, &fc, "dmabuf/wait");
+       if (IS_ERR(tsk)) {
+               err = PTR_ERR(tsk);
+               goto err;
+       }
+       get_task_struct(tsk);
+       yield_to(tsk, true);
+
+       for (i = 0; i < fc.chain_length; i++)
+               dma_fence_signal(fc.fences[i]);
+
+       err = kthread_stop(tsk);
+       put_task_struct(tsk);
+
+err:
+       fence_chains_fini(&fc);
+       return err;
+}
+
+static int wait_backward(void *arg)
+{
+       struct fence_chains fc;
+       struct task_struct *tsk;
+       int err;
+       int i;
+
+       err = fence_chains_init(&fc, CHAIN_SZ, seqno_inc);
+       if (err)
+               return err;
+
+       tsk = kthread_run(__wait_fence_chains, &fc, "dmabuf/wait");
+       if (IS_ERR(tsk)) {
+               err = PTR_ERR(tsk);
+               goto err;
+       }
+       get_task_struct(tsk);
+       yield_to(tsk, true);
+
+       for (i = fc.chain_length; i--; )
+               dma_fence_signal(fc.fences[i]);
+
+       err = kthread_stop(tsk);
+       put_task_struct(tsk);
+
+err:
+       fence_chains_fini(&fc);
+       return err;
+}
+
+static void randomise_fences(struct fence_chains *fc)
+{
+       unsigned int count = fc->chain_length;
+
+       /* Fisher-Yates shuffle courtesy of Knuth */
+       while (--count) {
+               unsigned int swp;
+
+               swp = prandom_u32_max(count + 1);
+               if (swp == count)
+                       continue;
+
+               swap(fc->fences[count], fc->fences[swp]);
+       }
+}
+
+static int wait_random(void *arg)
+{
+       struct fence_chains fc;
+       struct task_struct *tsk;
+       int err;
+       int i;
+
+       err = fence_chains_init(&fc, CHAIN_SZ, seqno_inc);
+       if (err)
+               return err;
+
+       randomise_fences(&fc);
+
+       tsk = kthread_run(__wait_fence_chains, &fc, "dmabuf/wait");
+       if (IS_ERR(tsk)) {
+               err = PTR_ERR(tsk);
+               goto err;
+       }
+       get_task_struct(tsk);
+       yield_to(tsk, true);
+
+       for (i = 0; i < fc.chain_length; i++)
+               dma_fence_signal(fc.fences[i]);
+
+       err = kthread_stop(tsk);
+       put_task_struct(tsk);
+
+err:
+       fence_chains_fini(&fc);
+       return err;
+}
+
+int dma_fence_chain(void)
+{
+       static const struct subtest tests[] = {
+               SUBTEST(sanitycheck),
+               SUBTEST(find_seqno),
+               SUBTEST(find_signaled),
+               SUBTEST(find_out_of_order),
+               SUBTEST(find_gap),
+               SUBTEST(find_race),
+               SUBTEST(signal_forward),
+               SUBTEST(signal_backward),
+               SUBTEST(wait_forward),
+               SUBTEST(wait_backward),
+               SUBTEST(wait_random),
+       };
+       int ret;
+
+       pr_info("sizeof(dma_fence_chain)=%zu\n",
+               sizeof(struct dma_fence_chain));
+
+       slab_fences = KMEM_CACHE(mock_fence,
+                                SLAB_TYPESAFE_BY_RCU |
+                                SLAB_HWCACHE_ALIGN);
+       if (!slab_fences)
+               return -ENOMEM;
+
+       ret = subtests(tests, NULL);
+
+       kmem_cache_destroy(slab_fences);
+       return ret;
+}
index fb92be7e8aa71b2d4f5bfa7f70fa9058a6f45240..c4fd57d8b717cefe95042561a1f9c3ac24775d82 100644 (file)
@@ -310,8 +310,6 @@ source "drivers/gpu/drm/ast/Kconfig"
 
 source "drivers/gpu/drm/mgag200/Kconfig"
 
-source "drivers/gpu/drm/cirrus/Kconfig"
-
 source "drivers/gpu/drm/armada/Kconfig"
 
 source "drivers/gpu/drm/atmel-hlcdc/Kconfig"
index 7f72ef5e781113d6fed2438ab4d9f126bed1facf..2c0e5a7e595362a5176a148d6d8d2770fd891804 100644 (file)
@@ -17,7 +17,8 @@ drm-y       :=        drm_auth.o drm_cache.o \
                drm_plane.o drm_color_mgmt.o drm_print.o \
                drm_dumb_buffers.o drm_mode_config.o drm_vblank.o \
                drm_syncobj.o drm_lease.o drm_writeback.o drm_client.o \
-               drm_client_modeset.o drm_atomic_uapi.o drm_hdcp.o
+               drm_client_modeset.o drm_atomic_uapi.o drm_hdcp.o \
+               drm_managed.o
 
 drm-$(CONFIG_DRM_LEGACY) += drm_legacy_misc.o drm_bufs.o drm_context.o drm_dma.o drm_scatter.o drm_lock.o
 drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
@@ -32,8 +33,7 @@ drm-$(CONFIG_PCI) += drm_pci.o
 drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o
 drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
 
-drm_vram_helper-y := drm_gem_vram_helper.o \
-                    drm_vram_helper_common.o
+drm_vram_helper-y := drm_gem_vram_helper.o
 obj-$(CONFIG_DRM_VRAM_HELPER) += drm_vram_helper.o
 
 drm_ttm_helper-y := drm_gem_ttm_helper.o
@@ -74,7 +74,6 @@ obj-$(CONFIG_DRM_I915)        += i915/
 obj-$(CONFIG_DRM_MGAG200) += mgag200/
 obj-$(CONFIG_DRM_V3D)  += v3d/
 obj-$(CONFIG_DRM_VC4)  += vc4/
-obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus/
 obj-$(CONFIG_DRM_SIS)   += sis/
 obj-$(CONFIG_DRM_SAVAGE)+= savage/
 obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
index c2bbcdd9c875c25efac9bdc7cf63fd249d306c0e..210d57a4afc812b0c0b6d4b3d4a8d38f89afc159 100644 (file)
@@ -55,7 +55,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
        amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
        amdgpu_gmc.o amdgpu_mmhub.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
        amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \
-       amdgpu_umc.o smu_v11_0_i2c.o
+       amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o
 
 amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o
 
index 8ac1581a6b53b21c0c9641455f1f4356915d6d23..cd913986863edc4ddeee45f1aa7f8b7dd7c7913d 100644 (file)
 #ifndef __AMDGPU_H__
 #define __AMDGPU_H__
 
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) "amdgpu: " fmt
+
+#ifdef dev_fmt
+#undef dev_fmt
+#endif
+
+#define dev_fmt(fmt) "amdgpu: " fmt
+
 #include "amdgpu_ctx.h"
 
 #include <linux/atomic.h>
@@ -161,6 +173,7 @@ extern int amdgpu_gpu_recovery;
 extern int amdgpu_emu_mode;
 extern uint amdgpu_smu_memory_pool_size;
 extern uint amdgpu_dc_feature_mask;
+extern uint amdgpu_dc_debug_mask;
 extern uint amdgpu_dm_abm_level;
 extern struct amdgpu_mgpu_info mgpu_info;
 extern int amdgpu_ras_enable;
@@ -177,6 +190,8 @@ extern int sched_policy;
 static const int sched_policy = KFD_SCHED_POLICY_HWS;
 #endif
 
+extern int amdgpu_tmz;
+
 #ifdef CONFIG_DRM_AMDGPU_SI
 extern int amdgpu_si_support;
 #endif
@@ -190,8 +205,6 @@ extern int amdgpu_cik_support;
 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS         3000
 #define AMDGPU_MAX_USEC_TIMEOUT                        100000  /* 100 ms */
 #define AMDGPU_FENCE_JIFFIES_TIMEOUT           (HZ / 2)
-/* AMDGPU_IB_POOL_SIZE must be a power of 2 */
-#define AMDGPU_IB_POOL_SIZE                    16
 #define AMDGPU_DEBUGFS_MAX_COMPONENTS          32
 #define AMDGPUFB_CONN_LIMIT                    4
 #define AMDGPU_BIOS_NUM_SCRATCH                        16
@@ -439,7 +452,9 @@ struct amdgpu_fpriv {
 int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv);
 
 int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
-                 unsigned size, struct amdgpu_ib *ib);
+                 unsigned size,
+                 enum amdgpu_ib_pool_type pool,
+                 struct amdgpu_ib *ib);
 void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
                    struct dma_fence *f);
 int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
@@ -512,7 +527,7 @@ static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p,
 /*
  * Writeback
  */
-#define AMDGPU_MAX_WB 128      /* Reserve at most 128 WB slots for amdgpu-owned rings. */
+#define AMDGPU_MAX_WB 256      /* Reserve at most 256 WB slots for amdgpu-owned rings. */
 
 struct amdgpu_wb {
        struct amdgpu_bo        *wb_obj;
@@ -724,6 +739,7 @@ struct amdgpu_device {
        uint32_t                        rev_id;
        uint32_t                        external_rev_id;
        unsigned long                   flags;
+       unsigned long                   apu_flags;
        int                             usec_timeout;
        const struct amdgpu_asic_funcs  *asic_funcs;
        bool                            shutdown;
@@ -751,7 +767,6 @@ struct amdgpu_device {
        uint8_t                         *bios;
        uint32_t                        bios_size;
        struct amdgpu_bo                *stolen_vga_memory;
-       struct amdgpu_bo                *discovery_memory;
        uint32_t                        bios_scratch_reg_offset;
        uint32_t                        bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
 
@@ -843,7 +858,8 @@ struct amdgpu_device {
        unsigned                        num_rings;
        struct amdgpu_ring              *rings[AMDGPU_MAX_RINGS];
        bool                            ib_pool_ready;
-       struct amdgpu_sa_manager        ring_tmp_bo;
+       struct amdgpu_sa_manager        ib_pools[AMDGPU_IB_POOL_MAX];
+       struct amdgpu_sched             gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX];
 
        /* interrupts */
        struct amdgpu_irq               irq;
@@ -903,7 +919,9 @@ struct amdgpu_device {
        struct amdgpu_display_manager dm;
 
        /* discovery */
-       uint8_t                         *discovery;
+       uint8_t                         *discovery_bin;
+       uint32_t                        discovery_tmr_size;
+       struct amdgpu_bo                *discovery_memory;
 
        /* mes */
        bool                            enable_mes;
@@ -923,7 +941,7 @@ struct amdgpu_device {
        atomic64_t gart_pin_size;
 
        /* soc15 register offset based on ip, instance and  segment */
-       uint32_t                *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
+       uint32_t                *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
 
        /* delayed work_func for deferring clockgating during resume */
        struct delayed_work     delayed_init_work;
@@ -935,9 +953,6 @@ struct amdgpu_device {
        /* link all shadow bo */
        struct list_head                shadow_list;
        struct mutex                    shadow_list_lock;
-       /* keep an lru list of rings by HW IP */
-       struct list_head                ring_lru_list;
-       spinlock_t                      ring_lru_list_lock;
 
        /* record hw reset is performed */
        bool has_hw_reset;
@@ -947,8 +962,6 @@ struct amdgpu_device {
        bool                            in_suspend;
        bool                            in_hibernate;
 
-       /* record last mm index being written through WREG32*/
-       unsigned long last_mm_index;
        bool                            in_gpu_reset;
        enum pp_mp1_state               mp1_state;
        struct mutex  lock_reset;
@@ -967,14 +980,19 @@ struct amdgpu_device {
        uint64_t                        unique_id;
        uint64_t        df_perfmon_config_assign_mask[AMDGPU_MAX_DF_PERFMONS];
 
-       /* device pstate */
-       int                             pstate;
        /* enable runtime pm on the device */
        bool                            runpm;
        bool                            in_runpm;
 
        bool                            pm_sysfs_en;
        bool                            ucode_sysfs_en;
+
+       /* Chip product information */
+       char                            product_number[16];
+       char                            product_name[32];
+       char                            serial[16];
+
+       struct amdgpu_autodump          autodump;
 };
 
 static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
@@ -991,10 +1009,10 @@ int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
 
 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
                               uint32_t *buf, size_t size, bool write);
-uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
+uint32_t amdgpu_device_rreg(struct amdgpu_device *adev, uint32_t reg,
+                           uint32_t acc_flags);
+void amdgpu_device_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
                        uint32_t acc_flags);
-void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
-                   uint32_t acc_flags);
 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
                    uint32_t acc_flags);
 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value);
@@ -1011,25 +1029,20 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
 /*
  * Registers read & write functions.
  */
-
-#define AMDGPU_REGS_IDX       (1<<0)
 #define AMDGPU_REGS_NO_KIQ    (1<<1)
-#define AMDGPU_REGS_KIQ       (1<<2)
 
-#define RREG32_NO_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
-#define WREG32_NO_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
+#define RREG32_NO_KIQ(reg) amdgpu_device_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
+#define WREG32_NO_KIQ(reg, v) amdgpu_device_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
 
-#define RREG32_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_KIQ)
-#define WREG32_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_KIQ)
+#define RREG32_KIQ(reg) amdgpu_kiq_rreg(adev, (reg))
+#define WREG32_KIQ(reg, v) amdgpu_kiq_wreg(adev, (reg), (v))
 
 #define RREG8(reg) amdgpu_mm_rreg8(adev, (reg))
 #define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v))
 
-#define RREG32(reg) amdgpu_mm_rreg(adev, (reg), 0)
-#define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_IDX)
-#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), 0))
-#define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), 0)
-#define WREG32_IDX(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_IDX)
+#define RREG32(reg) amdgpu_device_rreg(adev, (reg), 0)
+#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_device_rreg(adev, (reg), 0))
+#define WREG32(reg, v) amdgpu_device_wreg(adev, (reg), (v), 0)
 #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
 #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
 #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
@@ -1066,7 +1079,7 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
                tmp_ |= ((val) & ~(mask));                      \
                WREG32_PLL(reg, tmp_);                          \
        } while (0)
-#define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_mm_rreg((adev), (reg), false))
+#define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_device_rreg((adev), (reg), false))
 #define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg))
 #define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v))
 
@@ -1249,5 +1262,9 @@ _name##_show(struct device *dev,                                  \
                                                                        \
 static struct device_attribute pmu_attr_##_name = __ATTR_RO(_name)
 
-#endif
+static inline bool amdgpu_is_tmz(struct amdgpu_device *adev)
+{
+       return adev->gmc.tmz_enabled;
+}
 
+#endif
index 1e41367ef74ee12c4181cc120e94cd1f6f3c5706..956cbbda479356fb00f263e6d45d044b1de96761 100644 (file)
@@ -444,7 +444,6 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
 
                DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count);
 
-               /* todo: add DC handling */
                if ((req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) &&
                    !amdgpu_device_has_dc_support(adev)) {
                        struct amdgpu_encoder *enc = atif->encoder_for_bl;
@@ -463,6 +462,27 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
 #endif
                        }
                }
+#if defined(CONFIG_DRM_AMD_DC)
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+               if ((req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) &&
+                   amdgpu_device_has_dc_support(adev)) {
+                       struct amdgpu_display_manager *dm = &adev->dm;
+                       struct backlight_device *bd = dm->backlight_dev;
+
+                       if (bd) {
+                               DRM_DEBUG_DRIVER("Changing brightness to %d\n",
+                                                req.backlight_level);
+
+                               /*
+                                * XXX backlight_device_set_brightness() is
+                                * hardwired to post BACKLIGHT_UPDATE_SYSFS.
+                                * It probably should accept 'reason' parameter.
+                                */
+                               backlight_device_set_brightness(bd, req.backlight_level);
+                       }
+               }
+#endif
+#endif
                if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
                        if (adev->flags & AMD_IS_PX) {
                                pm_runtime_get_sync(adev->ddev->dev);
index abfbe89e805ef2f4ef89585b12f41a9066273fd3..ad59ac4423b8bddb829d8a62d6101d85bdb0fe79 100644 (file)
@@ -564,6 +564,13 @@ uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd)
        return adev->gds.gws_size;
 }
 
+uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+       return adev->rev_id;
+}
+
 int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
                                uint32_t vmid, uint64_t gpu_addr,
                                uint32_t *ib_cmd, uint32_t ib_len)
index 13feb313e9b3922e53f764d02a1935aba7208c00..3f2b695cf19e2a2b8e55b29205e666f79394854b 100644 (file)
@@ -65,6 +65,7 @@ struct kgd_mem {
        struct amdgpu_sync sync;
 
        bool aql_queue;
+       bool is_imported;
 };
 
 /* KFD Memory Eviction */
@@ -148,6 +149,9 @@ int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev);
 
 void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd);
 
+int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
+                                       int queue_bit);
+
 /* Shared API */
 int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
                                void **mem_obj, uint64_t *gpu_addr,
@@ -175,6 +179,7 @@ uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd);
 uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd);
 uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd);
 uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd);
+uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd);
 uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src);
 
 /* Read user wptr from a specified user address space with page fault
@@ -218,7 +223,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
                void *vm, struct kgd_mem **mem,
                uint64_t *offset, uint32_t flags);
 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
-               struct kgd_dev *kgd, struct kgd_mem *mem);
+               struct kgd_dev *kgd, struct kgd_mem *mem, uint64_t *size);
 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
                struct kgd_dev *kgd, struct kgd_mem *mem, void *vm);
 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
index 4ec6d0c0320107489a4b7072d68f056b48da915a..691c89705bcdc0aa1ab2fecbb0c8fad6acc28191 100644 (file)
@@ -543,6 +543,9 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
        uint32_t temp;
        struct v10_compute_mqd *m = get_mqd(mqd);
 
+       if (adev->in_gpu_reset)
+               return -EIO;
+
 #if 0
        unsigned long flags;
        int retry;
index 6a5b91d23fd9b8492c02996a5af0c60ee7d717bd..68e6e1bc8f3a524cd93b76e12a366f5202d4c3c4 100644 (file)
@@ -362,13 +362,13 @@ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
        ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
                                        &param);
        if (ret) {
-               pr_err("amdgpu: failed to validate PT BOs\n");
+               pr_err("failed to validate PT BOs\n");
                return ret;
        }
 
        ret = amdgpu_amdkfd_validate(&param, pd);
        if (ret) {
-               pr_err("amdgpu: failed to validate PD\n");
+               pr_err("failed to validate PD\n");
                return ret;
        }
 
@@ -377,7 +377,7 @@ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
        if (vm->use_cpu_for_update) {
                ret = amdgpu_bo_kmap(pd, NULL);
                if (ret) {
-                       pr_err("amdgpu: failed to kmap PD, ret=%d\n", ret);
+                       pr_err("failed to kmap PD, ret=%d\n", ret);
                        return ret;
                }
        }
@@ -660,15 +660,15 @@ static int reserve_bo_and_vm(struct kgd_mem *mem,
 
        ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
                                     false, &ctx->duplicates);
-       if (!ret)
-               ctx->reserved = true;
-       else {
-               pr_err("Failed to reserve buffers in ttm\n");
+       if (ret) {
+               pr_err("Failed to reserve buffers in ttm.\n");
                kfree(ctx->vm_pd);
                ctx->vm_pd = NULL;
+               return ret;
        }
 
-       return ret;
+       ctx->reserved = true;
+       return 0;
 }
 
 /**
@@ -733,17 +733,15 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
 
        ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
                                     false, &ctx->duplicates);
-       if (!ret)
-               ctx->reserved = true;
-       else
-               pr_err("Failed to reserve buffers in ttm.\n");
-
        if (ret) {
+               pr_err("Failed to reserve buffers in ttm.\n");
                kfree(ctx->vm_pd);
                ctx->vm_pd = NULL;
+               return ret;
        }
 
-       return ret;
+       ctx->reserved = true;
+       return 0;
 }
 
 /**
@@ -1279,31 +1277,30 @@ err:
 }
 
 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
-               struct kgd_dev *kgd, struct kgd_mem *mem)
+               struct kgd_dev *kgd, struct kgd_mem *mem, uint64_t *size)
 {
        struct amdkfd_process_info *process_info = mem->process_info;
        unsigned long bo_size = mem->bo->tbo.mem.size;
        struct kfd_bo_va_list *entry, *tmp;
        struct bo_vm_reservation_context ctx;
        struct ttm_validate_buffer *bo_list_entry;
+       unsigned int mapped_to_gpu_memory;
        int ret;
+       bool is_imported = 0;
 
        mutex_lock(&mem->lock);
-
-       if (mem->mapped_to_gpu_memory > 0) {
-               pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
-                               mem->va, bo_size);
-               mutex_unlock(&mem->lock);
-               return -EBUSY;
-       }
-
+       mapped_to_gpu_memory = mem->mapped_to_gpu_memory;
+       is_imported = mem->is_imported;
        mutex_unlock(&mem->lock);
        /* lock is not needed after this, since mem is unused and will
         * be freed anyway
         */
 
-       /* No more MMU notifiers */
-       amdgpu_mn_unregister(mem->bo);
+       if (mapped_to_gpu_memory > 0) {
+               pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
+                               mem->va, bo_size);
+               return -EBUSY;
+       }
 
        /* Make sure restore workers don't access the BO any more */
        bo_list_entry = &mem->validate_list;
@@ -1311,6 +1308,9 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
        list_del(&bo_list_entry->head);
        mutex_unlock(&process_info->lock);
 
+       /* No more MMU notifiers */
+       amdgpu_mn_unregister(mem->bo);
+
        ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
        if (unlikely(ret))
                return ret;
@@ -1342,6 +1342,17 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
                kfree(mem->bo->tbo.sg);
        }
 
+       /* Update the size of the BO being freed if it was allocated from
+        * VRAM and is not imported.
+        */
+       if (size) {
+               if ((mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM) &&
+                   (!is_imported))
+                       *size = bo_size;
+               else
+                       *size = 0;
+       }
+
        /* Free the BO*/
        drm_gem_object_put_unlocked(&mem->bo->tbo.base);
        mutex_destroy(&mem->lock);
@@ -1697,6 +1708,7 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
        (*mem)->process_info = avm->process_info;
        add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
        amdgpu_sync_create(&(*mem)->sync);
+       (*mem)->is_imported = true;
 
        return 0;
 }
index d1495e1c92894168bc84e3f89decd27f31516cf2..d9b35df33806d178afbea6d44954396121bee021 100644 (file)
@@ -40,7 +40,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
        for (i = 0; i < n; i++) {
                struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
                r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence,
-                                      false, false);
+                                      false, false, false);
                if (r)
                        goto exit_do_move;
                r = dma_fence_wait(fence, false);
index 031b094607bdd5bb4682fa04ad477c56ad238ac0..78ac6dbe70d84e22c9e2497354df636416931752 100644 (file)
@@ -60,8 +60,6 @@ static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device,
 {
        CGS_FUNC_ADEV;
        switch (space) {
-       case CGS_IND_REG__MMIO:
-               return RREG32_IDX(index);
        case CGS_IND_REG__PCIE:
                return RREG32_PCIE(index);
        case CGS_IND_REG__SMC:
@@ -77,6 +75,8 @@ static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device,
        case CGS_IND_REG__AUDIO_ENDPT:
                DRM_ERROR("audio endpt register access not implemented.\n");
                return 0;
+       default:
+               BUG();
        }
        WARN(1, "Invalid indirect register space");
        return 0;
@@ -88,8 +88,6 @@ static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
 {
        CGS_FUNC_ADEV;
        switch (space) {
-       case CGS_IND_REG__MMIO:
-               return WREG32_IDX(index, value);
        case CGS_IND_REG__PCIE:
                return WREG32_PCIE(index, value);
        case CGS_IND_REG__SMC:
@@ -105,6 +103,8 @@ static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
        case CGS_IND_REG__AUDIO_ENDPT:
                DRM_ERROR("audio endpt register access not implemented.\n");
                return;
+       default:
+               BUG();
        }
        WARN(1, "Invalid indirect register space");
 }
index af91627b19b0c5dd42a924fc2169136fd5165934..19070226a94521d112df8798bd5b19218d41364d 100644 (file)
@@ -924,7 +924,8 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
 
                ring = to_amdgpu_ring(entity->rq->sched);
                r =  amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ?
-                                  chunk_ib->ib_bytes : 0, ib);
+                                  chunk_ib->ib_bytes : 0,
+                                  AMDGPU_IB_POOL_DELAYED, ib);
                if (r) {
                        DRM_ERROR("Failed to get ib !\n");
                        return r;
@@ -1207,7 +1208,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 {
        struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
        struct drm_sched_entity *entity = p->entity;
-       enum drm_sched_priority priority;
        struct amdgpu_bo_list_entry *e;
        struct amdgpu_job *job;
        uint64_t seq;
@@ -1257,7 +1257,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 
        trace_amdgpu_cs_ioctl(job);
        amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
-       priority = job->base.s_priority;
        drm_sched_entity_push_job(&job->base, entity);
 
        amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
index 6ed36a2c5f73f2e89654d34b3cdbd9444c0c0270..8842c55d4490b3fdb57ffda94a7cfee219bfc14d 100644 (file)
@@ -26,6 +26,7 @@
 #include "amdgpu.h"
 #include "amdgpu_sched.h"
 #include "amdgpu_ras.h"
+#include <linux/nospec.h>
 
 #define to_amdgpu_ctx_entity(e)        \
        container_of((e), struct amdgpu_ctx_entity, entity)
@@ -72,13 +73,30 @@ static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sch
        }
 }
 
-static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const u32 ring)
+static unsigned int amdgpu_ctx_prio_sched_to_hw(struct amdgpu_device *adev,
+                                                enum drm_sched_priority prio,
+                                                u32 hw_ip)
+{
+       unsigned int hw_prio;
+
+       hw_prio = (hw_ip == AMDGPU_HW_IP_COMPUTE) ?
+                       amdgpu_ctx_sched_prio_to_compute_prio(prio) :
+                       AMDGPU_RING_PRIO_DEFAULT;
+       hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
+       if (adev->gpu_sched[hw_ip][hw_prio].num_scheds == 0)
+               hw_prio = AMDGPU_RING_PRIO_DEFAULT;
+
+       return hw_prio;
+}
+
+static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
+                                  const u32 ring)
 {
        struct amdgpu_device *adev = ctx->adev;
        struct amdgpu_ctx_entity *entity;
        struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
        unsigned num_scheds = 0;
-       enum gfx_pipe_priority hw_prio;
+       unsigned int hw_prio;
        enum drm_sched_priority priority;
        int r;
 
@@ -90,52 +108,16 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const
        entity->sequence = 1;
        priority = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
                                ctx->init_priority : ctx->override_priority;
-       switch (hw_ip) {
-       case AMDGPU_HW_IP_GFX:
-               sched = &adev->gfx.gfx_ring[0].sched;
-               scheds = &sched;
-               num_scheds = 1;
-               break;
-       case AMDGPU_HW_IP_COMPUTE:
-               hw_prio = amdgpu_ctx_sched_prio_to_compute_prio(priority);
-               scheds = adev->gfx.compute_prio_sched[hw_prio];
-               num_scheds = adev->gfx.num_compute_sched[hw_prio];
-               break;
-       case AMDGPU_HW_IP_DMA:
-               scheds = adev->sdma.sdma_sched;
-               num_scheds = adev->sdma.num_sdma_sched;
-               break;
-       case AMDGPU_HW_IP_UVD:
-               sched = &adev->uvd.inst[0].ring.sched;
-               scheds = &sched;
-               num_scheds = 1;
-               break;
-       case AMDGPU_HW_IP_VCE:
-               sched = &adev->vce.ring[0].sched;
-               scheds = &sched;
-               num_scheds = 1;
-               break;
-       case AMDGPU_HW_IP_UVD_ENC:
-               sched = &adev->uvd.inst[0].ring_enc[0].sched;
-               scheds = &sched;
-               num_scheds = 1;
-               break;
-       case AMDGPU_HW_IP_VCN_DEC:
-               sched = drm_sched_pick_best(adev->vcn.vcn_dec_sched,
-                                           adev->vcn.num_vcn_dec_sched);
-               scheds = &sched;
-               num_scheds = 1;
-               break;
-       case AMDGPU_HW_IP_VCN_ENC:
-               sched = drm_sched_pick_best(adev->vcn.vcn_enc_sched,
-                                           adev->vcn.num_vcn_enc_sched);
+       hw_prio = amdgpu_ctx_prio_sched_to_hw(adev, priority, hw_ip);
+
+       hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
+       scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
+       num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
+
+       if (hw_ip == AMDGPU_HW_IP_VCN_ENC || hw_ip == AMDGPU_HW_IP_VCN_DEC) {
+               sched = drm_sched_pick_best(scheds, num_scheds);
                scheds = &sched;
                num_scheds = 1;
-               break;
-       case AMDGPU_HW_IP_VCN_JPEG:
-               scheds = adev->jpeg.jpeg_sched;
-               num_scheds =  adev->jpeg.num_jpeg_sched;
-               break;
        }
 
        r = drm_sched_entity_init(&entity->entity, priority, scheds, num_scheds,
@@ -178,7 +160,6 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
        ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
 
        return 0;
-
 }
 
 static void amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity)
@@ -525,7 +506,7 @@ static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
                                            enum drm_sched_priority priority)
 {
        struct amdgpu_device *adev = ctx->adev;
-       enum gfx_pipe_priority hw_prio;
+       unsigned int hw_prio;
        struct drm_gpu_scheduler **scheds = NULL;
        unsigned num_scheds;
 
@@ -534,9 +515,11 @@ static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
 
        /* set hw priority */
        if (hw_ip == AMDGPU_HW_IP_COMPUTE) {
-               hw_prio = amdgpu_ctx_sched_prio_to_compute_prio(priority);
-               scheds = adev->gfx.compute_prio_sched[hw_prio];
-               num_scheds = adev->gfx.num_compute_sched[hw_prio];
+               hw_prio = amdgpu_ctx_prio_sched_to_hw(adev, priority,
+                                                     AMDGPU_HW_IP_COMPUTE);
+               hw_prio = array_index_nospec(hw_prio, AMDGPU_RING_PRIO_MAX);
+               scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
+               num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
                drm_sched_entity_modify_sched(&aentity->entity, scheds,
                                              num_scheds);
        }
@@ -665,78 +648,3 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
        idr_destroy(&mgr->ctx_handles);
        mutex_destroy(&mgr->lock);
 }
-
-
-static void amdgpu_ctx_init_compute_sched(struct amdgpu_device *adev)
-{
-       int num_compute_sched_normal = 0;
-       int num_compute_sched_high = AMDGPU_MAX_COMPUTE_RINGS - 1;
-       int i;
-
-       /* use one drm sched array, gfx.compute_sched to store both high and
-        * normal priority drm compute schedulers */
-       for (i = 0; i < adev->gfx.num_compute_rings; i++) {
-               if (!adev->gfx.compute_ring[i].has_high_prio)
-                       adev->gfx.compute_sched[num_compute_sched_normal++] =
-                               &adev->gfx.compute_ring[i].sched;
-               else
-                       adev->gfx.compute_sched[num_compute_sched_high--] =
-                               &adev->gfx.compute_ring[i].sched;
-       }
-
-       /* compute ring only has two priority for now */
-       i = AMDGPU_GFX_PIPE_PRIO_NORMAL;
-       adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
-       adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
-
-       i = AMDGPU_GFX_PIPE_PRIO_HIGH;
-       if (num_compute_sched_high == (AMDGPU_MAX_COMPUTE_RINGS - 1)) {
-               /* When compute has no high priority rings then use */
-               /* normal priority sched array */
-               adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
-               adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
-       } else {
-               adev->gfx.compute_prio_sched[i] =
-                       &adev->gfx.compute_sched[num_compute_sched_high - 1];
-               adev->gfx.num_compute_sched[i] =
-                       adev->gfx.num_compute_rings - num_compute_sched_normal;
-       }
-}
-
-void amdgpu_ctx_init_sched(struct amdgpu_device *adev)
-{
-       int i, j;
-
-       amdgpu_ctx_init_compute_sched(adev);
-       for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
-               adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched;
-               adev->gfx.num_gfx_sched++;
-       }
-
-       for (i = 0; i < adev->sdma.num_instances; i++) {
-               adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched;
-               adev->sdma.num_sdma_sched++;
-       }
-
-       for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
-               if (adev->vcn.harvest_config & (1 << i))
-                       continue;
-               adev->vcn.vcn_dec_sched[adev->vcn.num_vcn_dec_sched++] =
-                       &adev->vcn.inst[i].ring_dec.sched;
-       }
-
-       for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
-               if (adev->vcn.harvest_config & (1 << i))
-                       continue;
-               for (j = 0; j < adev->vcn.num_enc_rings; ++j)
-                       adev->vcn.vcn_enc_sched[adev->vcn.num_vcn_enc_sched++] =
-                               &adev->vcn.inst[i].ring_enc[j].sched;
-       }
-
-       for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
-               if (adev->jpeg.harvest_config & (1 << i))
-                       continue;
-               adev->jpeg.jpeg_sched[adev->jpeg.num_jpeg_sched++] =
-                       &adev->jpeg.inst[i].ring_dec.sched;
-       }
-}
index de490f183af2bcf115182e25a24f8b9d8a34bf17..f54e1031466159cd1436836104e3dc4bdd026a07 100644 (file)
@@ -88,7 +88,4 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
 long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout);
 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
 
-void amdgpu_ctx_init_sched(struct amdgpu_device *adev);
-
-
 #endif
index c0f9a651dc067bbf46ce689535934740ab7315ee..d33cb344be69f5266b4933137151ffa5feb5117a 100644 (file)
@@ -27,7 +27,7 @@
 #include <linux/pci.h>
 #include <linux/uaccess.h>
 #include <linux/pm_runtime.h>
-
+#include <linux/poll.h>
 #include <drm/drm_debugfs.h>
 
 #include "amdgpu.h"
@@ -74,8 +74,82 @@ int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
        return 0;
 }
 
+int amdgpu_debugfs_wait_dump(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+       unsigned long timeout = 600 * HZ;
+       int ret;
+
+       wake_up_interruptible(&adev->autodump.gpu_hang);
+
+       ret = wait_for_completion_interruptible_timeout(&adev->autodump.dumping, timeout);
+       if (ret == 0) {
+               pr_err("autodump: timeout, move on to gpu recovery\n");
+               return -ETIMEDOUT;
+       }
+#endif
+       return 0;
+}
+
 #if defined(CONFIG_DEBUG_FS)
 
+static int amdgpu_debugfs_autodump_open(struct inode *inode, struct file *file)
+{
+       struct amdgpu_device *adev = inode->i_private;
+       int ret;
+
+       file->private_data = adev;
+
+       mutex_lock(&adev->lock_reset);
+       if (adev->autodump.dumping.done) {
+               reinit_completion(&adev->autodump.dumping);
+               ret = 0;
+       } else {
+               ret = -EBUSY;
+       }
+       mutex_unlock(&adev->lock_reset);
+
+       return ret;
+}
+
+static int amdgpu_debugfs_autodump_release(struct inode *inode, struct file *file)
+{
+       struct amdgpu_device *adev = file->private_data;
+
+       complete_all(&adev->autodump.dumping);
+       return 0;
+}
+
+static unsigned int amdgpu_debugfs_autodump_poll(struct file *file, struct poll_table_struct *poll_table)
+{
+       struct amdgpu_device *adev = file->private_data;
+
+       poll_wait(file, &adev->autodump.gpu_hang, poll_table);
+
+       if (adev->in_gpu_reset)
+               return POLLIN | POLLRDNORM | POLLWRNORM;
+
+       return 0;
+}
+
+static const struct file_operations autodump_debug_fops = {
+       .owner = THIS_MODULE,
+       .open = amdgpu_debugfs_autodump_open,
+       .poll = amdgpu_debugfs_autodump_poll,
+       .release = amdgpu_debugfs_autodump_release,
+};
+
+static void amdgpu_debugfs_autodump_init(struct amdgpu_device *adev)
+{
+       init_completion(&adev->autodump.dumping);
+       complete_all(&adev->autodump.dumping);
+       init_waitqueue_head(&adev->autodump.gpu_hang);
+
+       debugfs_create_file("amdgpu_autodump", 0600,
+               adev->ddev->primary->debugfs_root,
+               adev, &autodump_debug_fops);
+}
+
 /**
  * amdgpu_debugfs_process_reg_op - Handle MMIO register reads/writes
  *
@@ -152,11 +226,16 @@ static int  amdgpu_debugfs_process_reg_op(bool read, struct file *f,
        if (r < 0)
                return r;
 
+       r = amdgpu_virt_enable_access_debugfs(adev);
+       if (r < 0)
+               return r;
+
        if (use_bank) {
                if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
                    (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) {
                        pm_runtime_mark_last_busy(adev->ddev->dev);
                        pm_runtime_put_autosuspend(adev->ddev->dev);
+                       amdgpu_virt_disable_access_debugfs(adev);
                        return -EINVAL;
                }
                mutex_lock(&adev->grbm_idx_mutex);
@@ -207,6 +286,7 @@ end:
        pm_runtime_mark_last_busy(adev->ddev->dev);
        pm_runtime_put_autosuspend(adev->ddev->dev);
 
+       amdgpu_virt_disable_access_debugfs(adev);
        return result;
 }
 
@@ -255,6 +335,10 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
        if (r < 0)
                return r;
 
+       r = amdgpu_virt_enable_access_debugfs(adev);
+       if (r < 0)
+               return r;
+
        while (size) {
                uint32_t value;
 
@@ -263,6 +347,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
                if (r) {
                        pm_runtime_mark_last_busy(adev->ddev->dev);
                        pm_runtime_put_autosuspend(adev->ddev->dev);
+                       amdgpu_virt_disable_access_debugfs(adev);
                        return r;
                }
 
@@ -275,6 +360,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
        pm_runtime_mark_last_busy(adev->ddev->dev);
        pm_runtime_put_autosuspend(adev->ddev->dev);
 
+       amdgpu_virt_disable_access_debugfs(adev);
        return result;
 }
 
@@ -304,6 +390,10 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
        if (r < 0)
                return r;
 
+       r = amdgpu_virt_enable_access_debugfs(adev);
+       if (r < 0)
+               return r;
+
        while (size) {
                uint32_t value;
 
@@ -311,6 +401,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
                if (r) {
                        pm_runtime_mark_last_busy(adev->ddev->dev);
                        pm_runtime_put_autosuspend(adev->ddev->dev);
+                       amdgpu_virt_disable_access_debugfs(adev);
                        return r;
                }
 
@@ -325,6 +416,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
        pm_runtime_mark_last_busy(adev->ddev->dev);
        pm_runtime_put_autosuspend(adev->ddev->dev);
 
+       amdgpu_virt_disable_access_debugfs(adev);
        return result;
 }
 
@@ -354,6 +446,10 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
        if (r < 0)
                return r;
 
+       r = amdgpu_virt_enable_access_debugfs(adev);
+       if (r < 0)
+               return r;
+
        while (size) {
                uint32_t value;
 
@@ -362,6 +458,7 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
                if (r) {
                        pm_runtime_mark_last_busy(adev->ddev->dev);
                        pm_runtime_put_autosuspend(adev->ddev->dev);
+                       amdgpu_virt_disable_access_debugfs(adev);
                        return r;
                }
 
@@ -374,6 +471,7 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
        pm_runtime_mark_last_busy(adev->ddev->dev);
        pm_runtime_put_autosuspend(adev->ddev->dev);
 
+       amdgpu_virt_disable_access_debugfs(adev);
        return result;
 }
 
@@ -403,6 +501,10 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
        if (r < 0)
                return r;
 
+       r = amdgpu_virt_enable_access_debugfs(adev);
+       if (r < 0)
+               return r;
+
        while (size) {
                uint32_t value;
 
@@ -410,6 +512,7 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
                if (r) {
                        pm_runtime_mark_last_busy(adev->ddev->dev);
                        pm_runtime_put_autosuspend(adev->ddev->dev);
+                       amdgpu_virt_disable_access_debugfs(adev);
                        return r;
                }
 
@@ -424,6 +527,7 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
        pm_runtime_mark_last_busy(adev->ddev->dev);
        pm_runtime_put_autosuspend(adev->ddev->dev);
 
+       amdgpu_virt_disable_access_debugfs(adev);
        return result;
 }
 
@@ -453,6 +557,10 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
        if (r < 0)
                return r;
 
+       r = amdgpu_virt_enable_access_debugfs(adev);
+       if (r < 0)
+               return r;
+
        while (size) {
                uint32_t value;
 
@@ -461,6 +569,7 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
                if (r) {
                        pm_runtime_mark_last_busy(adev->ddev->dev);
                        pm_runtime_put_autosuspend(adev->ddev->dev);
+                       amdgpu_virt_disable_access_debugfs(adev);
                        return r;
                }
 
@@ -473,6 +582,7 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
        pm_runtime_mark_last_busy(adev->ddev->dev);
        pm_runtime_put_autosuspend(adev->ddev->dev);
 
+       amdgpu_virt_disable_access_debugfs(adev);
        return result;
 }
 
@@ -502,6 +612,10 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
        if (r < 0)
                return r;
 
+       r = amdgpu_virt_enable_access_debugfs(adev);
+       if (r < 0)
+               return r;
+
        while (size) {
                uint32_t value;
 
@@ -509,6 +623,7 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
                if (r) {
                        pm_runtime_mark_last_busy(adev->ddev->dev);
                        pm_runtime_put_autosuspend(adev->ddev->dev);
+                       amdgpu_virt_disable_access_debugfs(adev);
                        return r;
                }
 
@@ -523,6 +638,7 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
        pm_runtime_mark_last_busy(adev->ddev->dev);
        pm_runtime_put_autosuspend(adev->ddev->dev);
 
+       amdgpu_virt_disable_access_debugfs(adev);
        return result;
 }
 
@@ -651,16 +767,24 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
        if (r < 0)
                return r;
 
+       r = amdgpu_virt_enable_access_debugfs(adev);
+       if (r < 0)
+               return r;
+
        r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
 
        pm_runtime_mark_last_busy(adev->ddev->dev);
        pm_runtime_put_autosuspend(adev->ddev->dev);
 
-       if (r)
+       if (r) {
+               amdgpu_virt_disable_access_debugfs(adev);
                return r;
+       }
 
-       if (size > valuesize)
+       if (size > valuesize) {
+               amdgpu_virt_disable_access_debugfs(adev);
                return -EINVAL;
+       }
 
        outsize = 0;
        x = 0;
@@ -673,6 +797,7 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
                }
        }
 
+       amdgpu_virt_disable_access_debugfs(adev);
        return !r ? outsize : r;
 }
 
@@ -720,6 +845,10 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
        if (r < 0)
                return r;
 
+       r = amdgpu_virt_enable_access_debugfs(adev);
+       if (r < 0)
+               return r;
+
        /* switch to the specific se/sh/cu */
        mutex_lock(&adev->grbm_idx_mutex);
        amdgpu_gfx_select_se_sh(adev, se, sh, cu);
@@ -734,16 +863,20 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
        pm_runtime_mark_last_busy(adev->ddev->dev);
        pm_runtime_put_autosuspend(adev->ddev->dev);
 
-       if (!x)
+       if (!x) {
+               amdgpu_virt_disable_access_debugfs(adev);
                return -EINVAL;
+       }
 
        while (size && (offset < x * 4)) {
                uint32_t value;
 
                value = data[offset >> 2];
                r = put_user(value, (uint32_t *)buf);
-               if (r)
+               if (r) {
+                       amdgpu_virt_disable_access_debugfs(adev);
                        return r;
+               }
 
                result += 4;
                buf += 4;
@@ -751,6 +884,7 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
                size -= 4;
        }
 
+       amdgpu_virt_disable_access_debugfs(adev);
        return result;
 }
 
@@ -805,6 +939,10 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
        if (r < 0)
                return r;
 
+       r = amdgpu_virt_enable_access_debugfs(adev);
+       if (r < 0)
+               return r;
+
        /* switch to the specific se/sh/cu */
        mutex_lock(&adev->grbm_idx_mutex);
        amdgpu_gfx_select_se_sh(adev, se, sh, cu);
@@ -840,6 +978,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
 
 err:
        kfree(data);
+       amdgpu_virt_disable_access_debugfs(adev);
        return result;
 }
 
@@ -1369,6 +1508,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
 
        amdgpu_ras_debugfs_create_all(adev);
 
+       amdgpu_debugfs_autodump_init(adev);
+
        return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list,
                                        ARRAY_SIZE(amdgpu_debugfs_list));
 }
index de12d11015260eb3c44a9ef1cb95a434a26b5fda..2803884d338d53c1218b969fdb61c13658885660 100644 (file)
@@ -31,6 +31,11 @@ struct amdgpu_debugfs {
        unsigned                num_files;
 };
 
+struct amdgpu_autodump {
+       struct completion               dumping;
+       struct wait_queue_head          gpu_hang;
+};
+
 int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
 int amdgpu_debugfs_init(struct amdgpu_device *adev);
 void amdgpu_debugfs_fini(struct amdgpu_device *adev);
@@ -40,3 +45,4 @@ int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
 int amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
 int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
 int amdgpu_debugfs_gem_init(struct amdgpu_device *adev);
+int amdgpu_debugfs_wait_dump(struct amdgpu_device *adev);
index affde2de2a0dbd3f68f934c693ff7a906f9223a7..a027a8f7b28193c443788a1fec0f99acae702b69 100644 (file)
 #include "amdgpu_xgmi.h"
 #include "amdgpu_ras.h"
 #include "amdgpu_pmu.h"
+#include "amdgpu_fru_eeprom.h"
 
 #include <linux/suspend.h>
 #include <drm/task_barrier.h>
+#include <linux/pm_runtime.h>
 
 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
@@ -137,6 +139,72 @@ static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
 
 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
 
+/**
+ * DOC: product_name
+ *
+ * The amdgpu driver provides a sysfs API for reporting the product name
+ * for the device
+ * The file serial_number is used for this and returns the product name
+ * as returned from the FRU.
+ * NOTE: This is only available for certain server cards
+ */
+
+static ssize_t amdgpu_device_get_product_name(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct drm_device *ddev = dev_get_drvdata(dev);
+       struct amdgpu_device *adev = ddev->dev_private;
+
+       return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_name);
+}
+
+static DEVICE_ATTR(product_name, S_IRUGO,
+               amdgpu_device_get_product_name, NULL);
+
+/**
+ * DOC: product_number
+ *
+ * The amdgpu driver provides a sysfs API for reporting the part number
+ * for the device
+ * The file serial_number is used for this and returns the part number
+ * as returned from the FRU.
+ * NOTE: This is only available for certain server cards
+ */
+
+static ssize_t amdgpu_device_get_product_number(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct drm_device *ddev = dev_get_drvdata(dev);
+       struct amdgpu_device *adev = ddev->dev_private;
+
+       return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_number);
+}
+
+static DEVICE_ATTR(product_number, S_IRUGO,
+               amdgpu_device_get_product_number, NULL);
+
+/**
+ * DOC: serial_number
+ *
+ * The amdgpu driver provides a sysfs API for reporting the serial number
+ * for the device
+ * The file serial_number is used for this and returns the serial number
+ * as returned from the FRU.
+ * NOTE: This is only available for certain server cards
+ */
+
+static ssize_t amdgpu_device_get_serial_number(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct drm_device *ddev = dev_get_drvdata(dev);
+       struct amdgpu_device *adev = ddev->dev_private;
+
+       return snprintf(buf, PAGE_SIZE, "%s\n", adev->serial);
+}
+
+static DEVICE_ATTR(serial_number, S_IRUGO,
+               amdgpu_device_get_serial_number, NULL);
+
 /**
  * amdgpu_device_supports_boco - Is the device a dGPU with HG/PX power control
  *
@@ -231,10 +299,10 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
 }
 
 /*
- * MMIO register access helper functions.
+ * device register access helper functions.
  */
 /**
- * amdgpu_mm_rreg - read a memory mapped IO register
+ * amdgpu_device_rreg - read a register
  *
  * @adev: amdgpu_device pointer
  * @reg: dword aligned register offset
@@ -242,25 +310,19 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
  *
  * Returns the 32 bit value from the offset specified.
  */
-uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
-                       uint32_t acc_flags)
+uint32_t amdgpu_device_rreg(struct amdgpu_device *adev, uint32_t reg,
+                           uint32_t acc_flags)
 {
        uint32_t ret;
 
-       if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)))
+       if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
                return amdgpu_kiq_rreg(adev, reg);
 
-       if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
+       if ((reg * 4) < adev->rmmio_size)
                ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
-       else {
-               unsigned long flags;
-
-               spin_lock_irqsave(&adev->mmio_idx_lock, flags);
-               writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
-               ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
-               spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
-       }
-       trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
+       else
+               ret = adev->pcie_rreg(adev, (reg * 4));
+       trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
        return ret;
 }
 
@@ -306,28 +368,19 @@ void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
                BUG();
 }
 
-void static inline amdgpu_mm_wreg_mmio(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t acc_flags)
+void static inline amdgpu_device_wreg_no_kiq(struct amdgpu_device *adev, uint32_t reg,
+                                            uint32_t v, uint32_t acc_flags)
 {
-       trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
+       trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
 
-       if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
+       if ((reg * 4) < adev->rmmio_size)
                writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
-       else {
-               unsigned long flags;
-
-               spin_lock_irqsave(&adev->mmio_idx_lock, flags);
-               writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
-               writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
-               spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
-       }
-
-       if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
-               udelay(500);
-       }
+       else
+               adev->pcie_wreg(adev, (reg * 4), v);
 }
 
 /**
- * amdgpu_mm_wreg - write to a memory mapped IO register
+ * amdgpu_device_wreg - write to a register
  *
  * @adev: amdgpu_device pointer
  * @reg: dword aligned register offset
@@ -336,17 +389,13 @@ void static inline amdgpu_mm_wreg_mmio(struct amdgpu_device *adev, uint32_t reg,
  *
  * Writes the value specified to the offset specified.
  */
-void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
-                   uint32_t acc_flags)
+void amdgpu_device_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
+                       uint32_t acc_flags)
 {
-       if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
-               adev->last_mm_index = v;
-       }
-
-       if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)))
+       if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
                return amdgpu_kiq_wreg(adev, reg, v);
 
-       amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
+       amdgpu_device_wreg_no_kiq(adev, reg, v, acc_flags);
 }
 
 /*
@@ -365,7 +414,7 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t
                        return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v);
        }
 
-       amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
+       amdgpu_device_wreg_no_kiq(adev, reg, v, acc_flags);
 }
 
 /**
@@ -397,20 +446,12 @@ u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
  */
 void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 {
-       if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
-               adev->last_mm_index = v;
-       }
-
        if ((reg * 4) < adev->rio_mem_size)
                iowrite32(v, adev->rio_mem + (reg * 4));
        else {
                iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
                iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
        }
-
-       if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
-               udelay(500);
-       }
 }
 
 /**
@@ -1126,6 +1167,8 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
 
        adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
 
+       amdgpu_gmc_tmz_set(adev);
+
        return 0;
 }
 
@@ -1147,7 +1190,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
                return;
 
        if (state == VGA_SWITCHEROO_ON) {
-               pr_info("amdgpu: switched on\n");
+               pr_info("switched on\n");
                /* don't suspend or resume card normally */
                dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 
@@ -1161,7 +1204,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
                dev->switch_power_state = DRM_SWITCH_POWER_ON;
                drm_kms_helper_poll_enable(dev);
        } else {
-               pr_info("amdgpu: switched off\n");
+               pr_info("switched off\n");
                drm_kms_helper_poll_disable(dev);
                dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
                amdgpu_device_suspend(dev, true);
@@ -1524,9 +1567,9 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
                chip_name = "vega12";
                break;
        case CHIP_RAVEN:
-               if (adev->rev_id >= 8)
+               if (adev->apu_flags & AMD_APU_IS_RAVEN2)
                        chip_name = "raven2";
-               else if (adev->pdev->device == 0x15d8)
+               else if (adev->apu_flags & AMD_APU_IS_PICASSO)
                        chip_name = "picasso";
                else
                        chip_name = "raven";
@@ -1574,8 +1617,10 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
                        (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
                                                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 
-               if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
+               if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) {
+                       amdgpu_discovery_get_gfx_info(adev);
                        goto parse_soc_bounding_box;
+               }
 
                adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
                adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
@@ -1721,19 +1766,31 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
                return -EINVAL;
        }
 
-       r = amdgpu_device_parse_gpu_info_fw(adev);
-       if (r)
-               return r;
-
-       if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
-               amdgpu_discovery_get_gfx_info(adev);
-
        amdgpu_amdkfd_device_probe(adev);
 
        if (amdgpu_sriov_vf(adev)) {
+               /* handle vbios stuff prior full access mode for new handshake */
+               if (adev->virt.req_init_data_ver == 1) {
+                       if (!amdgpu_get_bios(adev)) {
+                               DRM_ERROR("failed to get vbios\n");
+                               return -EINVAL;
+                       }
+
+                       r = amdgpu_atombios_init(adev);
+                       if (r) {
+                               dev_err(adev->dev, "amdgpu_atombios_init failed\n");
+                               amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
+                               return r;
+                       }
+               }
+       }
+
+       /* we need to send REQ_GPU here for legacy handshaker otherwise the vbios
+        * will not be prepared by host for this VF */
+       if (amdgpu_sriov_vf(adev) && adev->virt.req_init_data_ver < 1) {
                r = amdgpu_virt_request_full_gpu(adev, true);
                if (r)
-                       return -EAGAIN;
+                       return r;
        }
 
        adev->pm.pp_feature = amdgpu_pp_feature_mask;
@@ -1763,6 +1820,14 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
                }
                /* get the vbios after the asic_funcs are set up */
                if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
+                       r = amdgpu_device_parse_gpu_info_fw(adev);
+                       if (r)
+                               return r;
+
+                       /* skip vbios handling for new handshake */
+                       if (amdgpu_sriov_vf(adev) && adev->virt.req_init_data_ver == 1)
+                               continue;
+
                        /* Read BIOS */
                        if (!amdgpu_get_bios(adev))
                                return -EINVAL;
@@ -1889,6 +1954,12 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
        if (r)
                return r;
 
+       if (amdgpu_sriov_vf(adev) && adev->virt.req_init_data_ver > 0) {
+               r = amdgpu_virt_request_full_gpu(adev, true);
+               if (r)
+                       return -EAGAIN;
+       }
+
        for (i = 0; i < adev->num_ip_blocks; i++) {
                if (!adev->ip_blocks[i].status.valid)
                        continue;
@@ -1975,6 +2046,8 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
                amdgpu_xgmi_add_device(adev);
        amdgpu_amdkfd_device_init(adev);
 
+       amdgpu_fru_get_product_info(adev);
+
 init_failed:
        if (amdgpu_sriov_vf(adev))
                amdgpu_virt_release_full_gpu(adev, true);
@@ -2171,6 +2244,8 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
                adev->ip_blocks[i].status.late_initialized = true;
        }
 
+       amdgpu_ras_set_error_query_ready(adev, true);
+
        amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
        amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
 
@@ -2203,7 +2278,8 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
                                if (gpu_instance->adev->flags & AMD_IS_APU)
                                        continue;
 
-                               r = amdgpu_xgmi_set_pstate(gpu_instance->adev, 0);
+                               r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
+                                               AMDGPU_XGMI_PSTATE_MIN);
                                if (r) {
                                        DRM_ERROR("pstate setting failed (%d).\n", r);
                                        break;
@@ -2785,12 +2861,12 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
         * By default timeout for non compute jobs is 10000.
         * And there is no timeout enforced on compute jobs.
         * In SR-IOV or passthrough mode, timeout for compute
-        * jobs are 10000 by default.
+        * jobs are 60000 by default.
         */
        adev->gfx_timeout = msecs_to_jiffies(10000);
        adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
        if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
-               adev->compute_timeout = adev->gfx_timeout;
+               adev->compute_timeout =  msecs_to_jiffies(60000);
        else
                adev->compute_timeout = MAX_SCHEDULE_TIMEOUT;
 
@@ -2841,6 +2917,14 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
        return ret;
 }
 
+static const struct attribute *amdgpu_dev_attributes[] = {
+       &dev_attr_product_name.attr,
+       &dev_attr_product_number.attr,
+       &dev_attr_serial_number.attr,
+       &dev_attr_pcie_replay_count.attr,
+       NULL
+};
+
 /**
  * amdgpu_device_init - initialize the driver
  *
@@ -2942,9 +3026,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        INIT_LIST_HEAD(&adev->shadow_list);
        mutex_init(&adev->shadow_list_lock);
 
-       INIT_LIST_HEAD(&adev->ring_lru_list);
-       spin_lock_init(&adev->ring_lru_list_lock);
-
        INIT_DELAYED_WORK(&adev->delayed_init_work,
                          amdgpu_device_delayed_init_work_handler);
        INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
@@ -2953,7 +3034,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
 
        adev->gfx.gfx_off_req_count = 1;
-       adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false;
+       adev->pm.ac_power = power_supply_is_system_supplied() > 0;
 
        /* Registers mapping */
        /* TODO: block userspace mapping of io register */
@@ -3002,18 +3083,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
                adev->enable_mes = true;
 
-       if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) {
-               r = amdgpu_discovery_init(adev);
-               if (r) {
-                       dev_err(adev->dev, "amdgpu_discovery_init failed\n");
-                       return r;
-               }
-       }
-
-       /* early init functions */
-       r = amdgpu_device_ip_early_init(adev);
-       if (r)
-               return r;
+       /* detect hw virtualization here */
+       amdgpu_detect_virtualization(adev);
 
        r = amdgpu_device_get_job_timeout_settings(adev);
        if (r) {
@@ -3021,6 +3092,11 @@ int amdgpu_device_init(struct amdgpu_device *adev,
                return r;
        }
 
+       /* early init functions */
+       r = amdgpu_device_ip_early_init(adev);
+       if (r)
+               return r;
+
        /* doorbell bar mapping and doorbell index init*/
        amdgpu_device_doorbell_init(adev);
 
@@ -3127,14 +3203,13 @@ fence_driver_init:
                goto failed;
        }
 
-       DRM_DEBUG("SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
+       dev_info(adev->dev,
+               "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
                        adev->gfx.config.max_shader_engines,
                        adev->gfx.config.max_sh_per_se,
                        adev->gfx.config.max_cu_per_sh,
                        adev->gfx.cu_info.number);
 
-       amdgpu_ctx_init_sched(adev);
-
        adev->accel_working = true;
 
        amdgpu_vm_check_compute_bug(adev);
@@ -3199,9 +3274,9 @@ fence_driver_init:
        queue_delayed_work(system_wq, &adev->delayed_init_work,
                           msecs_to_jiffies(AMDGPU_RESUME_MS));
 
-       r = device_create_file(adev->dev, &dev_attr_pcie_replay_count);
+       r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
        if (r) {
-               dev_err(adev->dev, "Could not create pcie_replay_count");
+               dev_err(adev->dev, "Could not create amdgpu device attr\n");
                return r;
        }
 
@@ -3284,9 +3359,10 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
        adev->rmmio = NULL;
        amdgpu_device_doorbell_fini(adev);
 
-       device_remove_file(adev->dev, &dev_attr_pcie_replay_count);
        if (adev->ucode_sysfs_en)
                amdgpu_ucode_sysfs_fini(adev);
+
+       sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
        if (IS_ENABLED(CONFIG_PERF_EVENTS))
                amdgpu_pmu_fini(adev);
        if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
@@ -3754,6 +3830,8 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
        if (r)
                return r;
 
+       amdgpu_amdkfd_pre_reset(adev);
+
        /* Resume IP prior to SMC */
        r = amdgpu_device_ip_reinit_early_sriov(adev);
        if (r)
@@ -3848,6 +3926,8 @@ static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
        int i, r = 0;
        bool need_full_reset  = *need_full_reset_arg;
 
+       amdgpu_debugfs_wait_dump(adev);
+
        /* block all schedulers and reset given job's ring */
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
                struct amdgpu_ring *ring = adev->rings[i];
@@ -4052,6 +4132,64 @@ static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
        mutex_unlock(&adev->lock_reset);
 }
 
+static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
+{
+       struct pci_dev *p = NULL;
+
+       p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
+                       adev->pdev->bus->number, 1);
+       if (p) {
+               pm_runtime_enable(&(p->dev));
+               pm_runtime_resume(&(p->dev));
+       }
+}
+
+static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
+{
+       enum amd_reset_method reset_method;
+       struct pci_dev *p = NULL;
+       u64 expires;
+
+       /*
+        * For now, only BACO and mode1 reset are confirmed
+        * to suffer the audio issue without proper suspended.
+        */
+       reset_method = amdgpu_asic_reset_method(adev);
+       if ((reset_method != AMD_RESET_METHOD_BACO) &&
+            (reset_method != AMD_RESET_METHOD_MODE1))
+               return -EINVAL;
+
+       p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
+                       adev->pdev->bus->number, 1);
+       if (!p)
+               return -ENODEV;
+
+       expires = pm_runtime_autosuspend_expiration(&(p->dev));
+       if (!expires)
+               /*
+                * If we cannot get the audio device autosuspend delay,
+                * a fixed 4S interval will be used. Considering 3S is
+                * the audio controller default autosuspend delay setting.
+                * 4S used here is guaranteed to cover that.
+                */
+               expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
+
+       while (!pm_runtime_status_suspended(&(p->dev))) {
+               if (!pm_runtime_suspend(&(p->dev)))
+                       break;
+
+               if (expires < ktime_get_mono_fast_ns()) {
+                       dev_warn(adev->dev, "failed to suspend display audio\n");
+                       /* TODO: abort the succeeding gpu reset? */
+                       return -ETIMEDOUT;
+               }
+       }
+
+       pm_runtime_disable(&(p->dev));
+
+       return 0;
+}
+
 /**
  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
  *
@@ -4067,7 +4205,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
                              struct amdgpu_job *job)
 {
        struct list_head device_list, *device_list_handle =  NULL;
-       bool need_full_reset, job_signaled;
+       bool need_full_reset = false;
+       bool job_signaled = false;
        struct amdgpu_hive_info *hive = NULL;
        struct amdgpu_device *tmp_adev = NULL;
        int i, r = 0;
@@ -4075,6 +4214,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
        bool use_baco =
                (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) ?
                true : false;
+       bool audio_suspended = false;
 
        /*
         * Flush RAM to disk so that after reboot
@@ -4088,16 +4228,9 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
                emergency_restart();
        }
 
-       need_full_reset = job_signaled = false;
-       INIT_LIST_HEAD(&device_list);
-
        dev_info(adev->dev, "GPU %s begin!\n",
                (in_ras_intr && !use_baco) ? "jobs stop":"reset");
 
-       cancel_delayed_work_sync(&adev->delayed_init_work);
-
-       hive = amdgpu_get_xgmi_hive(adev, false);
-
        /*
         * Here we trylock to avoid chain of resets executing from
         * either trigger by jobs on different adevs in XGMI hive or jobs on
@@ -4105,39 +4238,25 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
         * We always reset all schedulers for device and all devices for XGMI
         * hive so that should take care of them too.
         */
-
+       hive = amdgpu_get_xgmi_hive(adev, true);
        if (hive && !mutex_trylock(&hive->reset_lock)) {
                DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
                          job ? job->base.id : -1, hive->hive_id);
+               mutex_unlock(&hive->hive_lock);
                return 0;
        }
 
-       /* Start with adev pre asic reset first for soft reset check.*/
-       if (!amdgpu_device_lock_adev(adev, !hive)) {
-               DRM_INFO("Bailing on TDR for s_job:%llx, as another already in progress",
-                         job ? job->base.id : -1);
-               return 0;
-       }
-
-       /* Block kfd: SRIOV would do it separately */
-       if (!amdgpu_sriov_vf(adev))
-                amdgpu_amdkfd_pre_reset(adev);
-
-       /* Build list of devices to reset */
-       if  (adev->gmc.xgmi.num_physical_nodes > 1) {
-               if (!hive) {
-                       /*unlock kfd: SRIOV would do it separately */
-                       if (!amdgpu_sriov_vf(adev))
-                               amdgpu_amdkfd_post_reset(adev);
-                       amdgpu_device_unlock_adev(adev);
+       /*
+        * Build list of devices to reset.
+        * In case we are in XGMI hive mode, resort the device list
+        * to put adev in the 1st position.
+        */
+       INIT_LIST_HEAD(&device_list);
+       if (adev->gmc.xgmi.num_physical_nodes > 1) {
+               if (!hive)
                        return -ENODEV;
-               }
-
-               /*
-                * In case we are in XGMI hive mode device reset is done for all the
-                * nodes in the hive to retrain all XGMI links and hence the reset
-                * sequence is executed in loop on all nodes.
-                */
+               if (!list_is_first(&adev->gmc.xgmi.head, &hive->device_list))
+                       list_rotate_to_front(&adev->gmc.xgmi.head, &hive->device_list);
                device_list_handle = &hive->device_list;
        } else {
                list_add_tail(&adev->gmc.xgmi.head, &device_list);
@@ -4146,19 +4265,40 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 
        /* block all schedulers and reset given job's ring */
        list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
-               if (tmp_adev != adev) {
-                       amdgpu_device_lock_adev(tmp_adev, false);
-                       if (!amdgpu_sriov_vf(tmp_adev))
-                                       amdgpu_amdkfd_pre_reset(tmp_adev);
+               if (!amdgpu_device_lock_adev(tmp_adev, !hive)) {
+                       DRM_INFO("Bailing on TDR for s_job:%llx, as another already in progress",
+                                 job ? job->base.id : -1);
+                       mutex_unlock(&hive->hive_lock);
+                       return 0;
                }
 
+               /*
+                * Try to put the audio codec into suspend state
+                * before gpu reset started.
+                *
+                * Due to the power domain of the graphics device
+                * is shared with AZ power domain. Without this,
+                * we may change the audio hardware from behind
+                * the audio driver's back. That will trigger
+                * some audio codec errors.
+                */
+               if (!amdgpu_device_suspend_display_audio(tmp_adev))
+                       audio_suspended = true;
+
+               amdgpu_ras_set_error_query_ready(tmp_adev, false);
+
+               cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
+
+               if (!amdgpu_sriov_vf(tmp_adev))
+                       amdgpu_amdkfd_pre_reset(tmp_adev);
+
                /*
                 * Mark these ASICs to be reseted as untracked first
                 * And add them back after reset completed
                 */
                amdgpu_unregister_gpu_instance(tmp_adev);
 
-               amdgpu_fbdev_set_suspend(adev, 1);
+               amdgpu_fbdev_set_suspend(tmp_adev, 1);
 
                /* disable ras on ALL IPs */
                if (!(in_ras_intr && !use_baco) &&
@@ -4178,7 +4318,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
                }
        }
 
-
        if (in_ras_intr && !use_baco)
                goto skip_sched_resume;
 
@@ -4189,30 +4328,14 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
         * job->base holds a reference to parent fence
         */
        if (job && job->base.s_fence->parent &&
-           dma_fence_is_signaled(job->base.s_fence->parent))
+           dma_fence_is_signaled(job->base.s_fence->parent)) {
                job_signaled = true;
-
-       if (job_signaled) {
                dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
                goto skip_hw_reset;
        }
 
-
-       /* Guilty job will be freed after this*/
-       r = amdgpu_device_pre_asic_reset(adev, job, &need_full_reset);
-       if (r) {
-               /*TODO Should we stop ?*/
-               DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
-                         r, adev->ddev->unique);
-               adev->asic_reset_res = r;
-       }
-
 retry: /* Rest of adevs pre asic reset from XGMI hive. */
        list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
-
-               if (tmp_adev == adev)
-                       continue;
-
                r = amdgpu_device_pre_asic_reset(tmp_adev,
                                                 NULL,
                                                 &need_full_reset);
@@ -4274,11 +4397,15 @@ skip_sched_resume:
                /*unlock kfd: SRIOV would do it separately */
                if (!(in_ras_intr && !use_baco) && !amdgpu_sriov_vf(tmp_adev))
                        amdgpu_amdkfd_post_reset(tmp_adev);
+               if (audio_suspended)
+                       amdgpu_device_resume_display_audio(tmp_adev);
                amdgpu_device_unlock_adev(tmp_adev);
        }
 
-       if (hive)
+       if (hive) {
                mutex_unlock(&hive->reset_lock);
+               mutex_unlock(&hive->hive_lock);
+       }
 
        if (r)
                dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
index 057f6ea645d7eb12384007a96365002ef5e0aa17..61a26c15c8dd50398b4dc0909372863b15fc1782 100644 (file)
@@ -52,9 +52,6 @@ struct amdgpu_df_funcs {
        uint64_t (*get_fica)(struct amdgpu_device *adev, uint32_t ficaa_val);
        void (*set_fica)(struct amdgpu_device *adev, uint32_t ficaa_val,
                         uint32_t ficadl_val, uint32_t ficadh_val);
-       uint64_t (*get_dram_base_addr)(struct amdgpu_device *adev,
-                                      uint32_t df_inst);
-       uint32_t (*get_df_inst_id)(struct amdgpu_device *adev);
 };
 
 struct amdgpu_df {
index 27d8ae19a7a40ebd27489ab08546eac76133fe87..b5d6274952a5e664543ca363e2133789ea0fa4cf 100644 (file)
@@ -23,9 +23,7 @@
 
 #include "amdgpu.h"
 #include "amdgpu_discovery.h"
-#include "soc15_common.h"
 #include "soc15_hw_ip.h"
-#include "nbio/nbio_2_3_offset.h"
 #include "discovery.h"
 
 #define mmRCC_CONFIG_MEMSIZE   0xde3
@@ -135,9 +133,10 @@ static int hw_id_map[MAX_HWIP] = {
 static int amdgpu_discovery_read_binary(struct amdgpu_device *adev, uint8_t *binary)
 {
        uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
-       uint64_t pos = vram_size - DISCOVERY_TMR_SIZE;
+       uint64_t pos = vram_size - adev->discovery_tmr_size;
 
-       amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, DISCOVERY_TMR_SIZE, false);
+       amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
+                                 adev->discovery_tmr_size, false);
        return 0;
 }
 
@@ -158,7 +157,7 @@ static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size
        return !!(amdgpu_discovery_calculate_checksum(data, size) == expected);
 }
 
-int amdgpu_discovery_init(struct amdgpu_device *adev)
+static int amdgpu_discovery_init(struct amdgpu_device *adev)
 {
        struct table_info *info;
        struct binary_header *bhdr;
@@ -169,17 +168,18 @@ int amdgpu_discovery_init(struct amdgpu_device *adev)
        uint16_t checksum;
        int r;
 
-       adev->discovery = kzalloc(DISCOVERY_TMR_SIZE, GFP_KERNEL);
-       if (!adev->discovery)
+       adev->discovery_tmr_size = DISCOVERY_TMR_SIZE;
+       adev->discovery_bin = kzalloc(adev->discovery_tmr_size, GFP_KERNEL);
+       if (!adev->discovery_bin)
                return -ENOMEM;
 
-       r = amdgpu_discovery_read_binary(adev, adev->discovery);
+       r = amdgpu_discovery_read_binary(adev, adev->discovery_bin);
        if (r) {
                DRM_ERROR("failed to read ip discovery binary\n");
                goto out;
        }
 
-       bhdr = (struct binary_header *)adev->discovery;
+       bhdr = (struct binary_header *)adev->discovery_bin;
 
        if (le32_to_cpu(bhdr->binary_signature) != BINARY_SIGNATURE) {
                DRM_ERROR("invalid ip discovery binary signature\n");
@@ -192,7 +192,7 @@ int amdgpu_discovery_init(struct amdgpu_device *adev)
        size = bhdr->binary_size - offset;
        checksum = bhdr->binary_checksum;
 
-       if (!amdgpu_discovery_verify_checksum(adev->discovery + offset,
+       if (!amdgpu_discovery_verify_checksum(adev->discovery_bin + offset,
                                              size, checksum)) {
                DRM_ERROR("invalid ip discovery binary checksum\n");
                r = -EINVAL;
@@ -202,7 +202,7 @@ int amdgpu_discovery_init(struct amdgpu_device *adev)
        info = &bhdr->table_list[IP_DISCOVERY];
        offset = le16_to_cpu(info->offset);
        checksum = le16_to_cpu(info->checksum);
-       ihdr = (struct ip_discovery_header *)(adev->discovery + offset);
+       ihdr = (struct ip_discovery_header *)(adev->discovery_bin + offset);
 
        if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
                DRM_ERROR("invalid ip discovery data table signature\n");
@@ -210,7 +210,7 @@ int amdgpu_discovery_init(struct amdgpu_device *adev)
                goto out;
        }
 
-       if (!amdgpu_discovery_verify_checksum(adev->discovery + offset,
+       if (!amdgpu_discovery_verify_checksum(adev->discovery_bin + offset,
                                              ihdr->size, checksum)) {
                DRM_ERROR("invalid ip discovery data table checksum\n");
                r = -EINVAL;
@@ -220,9 +220,9 @@ int amdgpu_discovery_init(struct amdgpu_device *adev)
        info = &bhdr->table_list[GC];
        offset = le16_to_cpu(info->offset);
        checksum = le16_to_cpu(info->checksum);
-       ghdr = (struct gpu_info_header *)(adev->discovery + offset);
+       ghdr = (struct gpu_info_header *)(adev->discovery_bin + offset);
 
-       if (!amdgpu_discovery_verify_checksum(adev->discovery + offset,
+       if (!amdgpu_discovery_verify_checksum(adev->discovery_bin + offset,
                                              ghdr->size, checksum)) {
                DRM_ERROR("invalid gc data table checksum\n");
                r = -EINVAL;
@@ -232,16 +232,16 @@ int amdgpu_discovery_init(struct amdgpu_device *adev)
        return 0;
 
 out:
-       kfree(adev->discovery);
-       adev->discovery = NULL;
+       kfree(adev->discovery_bin);
+       adev->discovery_bin = NULL;
 
        return r;
 }
 
 void amdgpu_discovery_fini(struct amdgpu_device *adev)
 {
-       kfree(adev->discovery);
-       adev->discovery = NULL;
+       kfree(adev->discovery_bin);
+       adev->discovery_bin = NULL;
 }
 
 int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
@@ -257,14 +257,16 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
        uint8_t num_base_address;
        int hw_ip;
        int i, j, k;
+       int r;
 
-       if (!adev->discovery) {
-               DRM_ERROR("ip discovery uninitialized\n");
-               return -EINVAL;
+       r = amdgpu_discovery_init(adev);
+       if (r) {
+               DRM_ERROR("amdgpu_discovery_init failed\n");
+               return r;
        }
 
-       bhdr = (struct binary_header *)adev->discovery;
-       ihdr = (struct ip_discovery_header *)(adev->discovery +
+       bhdr = (struct binary_header *)adev->discovery_bin;
+       ihdr = (struct ip_discovery_header *)(adev->discovery_bin +
                        le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
        num_dies = le16_to_cpu(ihdr->num_dies);
 
@@ -272,7 +274,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
 
        for (i = 0; i < num_dies; i++) {
                die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
-               dhdr = (struct die_header *)(adev->discovery + die_offset);
+               dhdr = (struct die_header *)(adev->discovery_bin + die_offset);
                num_ips = le16_to_cpu(dhdr->num_ips);
                ip_offset = die_offset + sizeof(*dhdr);
 
@@ -286,7 +288,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
                                le16_to_cpu(dhdr->die_id), num_ips);
 
                for (j = 0; j < num_ips; j++) {
-                       ip = (struct ip *)(adev->discovery + ip_offset);
+                       ip = (struct ip *)(adev->discovery_bin + ip_offset);
                        num_base_address = ip->num_base_address;
 
                        DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
@@ -335,24 +337,24 @@ int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
        uint16_t num_ips;
        int i, j;
 
-       if (!adev->discovery) {
+       if (!adev->discovery_bin) {
                DRM_ERROR("ip discovery uninitialized\n");
                return -EINVAL;
        }
 
-       bhdr = (struct binary_header *)adev->discovery;
-       ihdr = (struct ip_discovery_header *)(adev->discovery +
+       bhdr = (struct binary_header *)adev->discovery_bin;
+       ihdr = (struct ip_discovery_header *)(adev->discovery_bin +
                        le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
        num_dies = le16_to_cpu(ihdr->num_dies);
 
        for (i = 0; i < num_dies; i++) {
                die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
-               dhdr = (struct die_header *)(adev->discovery + die_offset);
+               dhdr = (struct die_header *)(adev->discovery_bin + die_offset);
                num_ips = le16_to_cpu(dhdr->num_ips);
                ip_offset = die_offset + sizeof(*dhdr);
 
                for (j = 0; j < num_ips; j++) {
-                       ip = (struct ip *)(adev->discovery + ip_offset);
+                       ip = (struct ip *)(adev->discovery_bin + ip_offset);
 
                        if (le16_to_cpu(ip->hw_id) == hw_id) {
                                if (major)
@@ -375,13 +377,13 @@ int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
        struct binary_header *bhdr;
        struct gc_info_v1_0 *gc_info;
 
-       if (!adev->discovery) {
+       if (!adev->discovery_bin) {
                DRM_ERROR("ip discovery uninitialized\n");
                return -EINVAL;
        }
 
-       bhdr = (struct binary_header *)adev->discovery;
-       gc_info = (struct gc_info_v1_0 *)(adev->discovery +
+       bhdr = (struct binary_header *)adev->discovery_bin;
+       gc_info = (struct gc_info_v1_0 *)(adev->discovery_bin +
                        le16_to_cpu(bhdr->table_list[GC].offset));
 
        adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->gc_num_se);
index ba78e15d9b055b3ecca1e9aba31d2fb268507c87..d50d597c45ed48315461680d6ba2832cb0822d28 100644 (file)
@@ -26,7 +26,6 @@
 
 #define DISCOVERY_TMR_SIZE  (64 << 10)
 
-int amdgpu_discovery_init(struct amdgpu_device *adev);
 void amdgpu_discovery_fini(struct amdgpu_device *adev);
 int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev);
 int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
index 84cee27cd7efb4c4b05b40f099cc2ef117c29ec0..f7143d927b6d8126c7fc445e29eb57e2cfa53fb4 100644 (file)
@@ -523,7 +523,8 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
                        break;
                case CHIP_RAVEN:
                        /* enable S/G on PCO and RV2 */
-                       if (adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
+                       if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
+                           (adev->apu_flags & AMD_APU_IS_PICASSO))
                                domain |= AMDGPU_GEM_DOMAIN_GTT;
                        break;
                default:
index ffeb20f11c07caf54eeb5632c6b85b229c221190..43d8ed7dbd0018d66192e43f487f5aadc814fc4c 100644 (file)
@@ -38,6 +38,7 @@
 #include <drm/amdgpu_drm.h>
 #include <linux/dma-buf.h>
 #include <linux/dma-fence-array.h>
+#include <linux/pci-p2pdma.h>
 
 /**
  * amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation
@@ -179,6 +180,9 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
        int r;
 
+       if (pci_p2pdma_distance_many(adev->pdev, &attach->dev, 1, true) < 0)
+               attach->peer2peer = false;
+
        if (attach->dev->driver == adev->dev->driver)
                return 0;
 
@@ -272,14 +276,21 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
        struct dma_buf *dma_buf = attach->dmabuf;
        struct drm_gem_object *obj = dma_buf->priv;
        struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+       struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
        struct sg_table *sgt;
        long r;
 
        if (!bo->pin_count) {
-               /* move buffer into GTT */
+               /* move buffer into GTT or VRAM */
                struct ttm_operation_ctx ctx = { false, false };
+               unsigned domains = AMDGPU_GEM_DOMAIN_GTT;
 
-               amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+               if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM &&
+                   attach->peer2peer) {
+                       bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+                       domains |= AMDGPU_GEM_DOMAIN_VRAM;
+               }
+               amdgpu_bo_placement_from_domain(bo, domains);
                r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
                if (r)
                        return ERR_PTR(r);
@@ -289,20 +300,34 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
                return ERR_PTR(-EBUSY);
        }
 
-       sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages, bo->tbo.num_pages);
-       if (IS_ERR(sgt))
-               return sgt;
-
-       if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
-                             DMA_ATTR_SKIP_CPU_SYNC))
-               goto error_free;
+       switch (bo->tbo.mem.mem_type) {
+       case TTM_PL_TT:
+               sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages,
+                                           bo->tbo.num_pages);
+               if (IS_ERR(sgt))
+                       return sgt;
+
+               if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
+                                     DMA_ATTR_SKIP_CPU_SYNC))
+                       goto error_free;
+               break;
+
+       case TTM_PL_VRAM:
+               r = amdgpu_vram_mgr_alloc_sgt(adev, &bo->tbo.mem, attach->dev,
+                                             dir, &sgt);
+               if (r)
+                       return ERR_PTR(r);
+               break;
+       default:
+               return ERR_PTR(-EINVAL);
+       }
 
        return sgt;
 
 error_free:
        sg_free_table(sgt);
        kfree(sgt);
-       return ERR_PTR(-ENOMEM);
+       return ERR_PTR(-EBUSY);
 }
 
 /**
@@ -318,9 +343,18 @@ static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
                                 struct sg_table *sgt,
                                 enum dma_data_direction dir)
 {
-       dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
-       sg_free_table(sgt);
-       kfree(sgt);
+       struct dma_buf *dma_buf = attach->dmabuf;
+       struct drm_gem_object *obj = dma_buf->priv;
+       struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+       struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+
+       if (sgt->sgl->page_link) {
+               dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
+               sg_free_table(sgt);
+               kfree(sgt);
+       } else {
+               amdgpu_vram_mgr_free_sgt(adev, attach->dev, dir, sgt);
+       }
 }
 
 /**
@@ -514,6 +548,7 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
 }
 
 static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops = {
+       .allow_peer2peer = true,
        .move_notify = amdgpu_dma_buf_move_notify
 };
 
index ba1bb95a3cf93c7864cb18a6f00b898999836980..d2a105e3bf7cccd98e70681b31b32f028d0e9367 100644 (file)
@@ -856,7 +856,7 @@ void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
                                const char *name = pp_lib_thermal_controller_names[controller->ucType];
                                info.addr = controller->ucI2cAddress >> 1;
                                strlcpy(info.type, name, sizeof(info.type));
-                               i2c_new_device(&adev->pm.i2c_bus->adapter, &info);
+                               i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info);
                        }
                } else {
                        DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
@@ -1188,3 +1188,13 @@ int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
 
        return ret;
 }
+
+int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
+{
+       struct smu_context *smu = &adev->smu;
+
+       if (is_support_sw_smu(adev))
+               return smu_allow_xgmi_power_down(smu, en);
+
+       return 0;
+}
\ No newline at end of file
index 936d85aa0fbc5fd4387793e6d851c9cddd45c028..6a8aae70a0e68d2571051f9ab859cf9961779b12 100644 (file)
@@ -450,6 +450,7 @@ struct amdgpu_pm {
 
        /* Used for I2C access to various EEPROMs on relevant ASICs */
        struct i2c_adapter smu_i2c;
+       struct list_head        pm_attr_list;
 };
 
 #define R600_SSTU_DFLT                               0
@@ -538,4 +539,6 @@ int amdgpu_dpm_baco_enter(struct amdgpu_device *adev);
 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
                             uint32_t cstate);
 
+int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en);
+
 #endif
index a735d79a717be8444a1491408d8e25f65402c168..126e74758a342c67bef0f9b43b1312674d12f536 100644 (file)
  * - 3.35.0 - Add drm_amdgpu_info_device::tcc_disabled_mask
  * - 3.36.0 - Allow reading more status registers on si/cik
  * - 3.37.0 - L2 is invalidated before SDMA IBs, needed for correctness
+ * - 3.38.0 - Add AMDGPU_IB_FLAG_EMIT_MEM_SYNC
  */
 #define KMS_DRIVER_MAJOR       3
-#define KMS_DRIVER_MINOR       37
+#define KMS_DRIVER_MINOR       38
 #define KMS_DRIVER_PATCHLEVEL  0
 
 int amdgpu_vram_limit = 0;
@@ -139,12 +140,14 @@ int amdgpu_emu_mode = 0;
 uint amdgpu_smu_memory_pool_size = 0;
 /* FBC (bit 0) disabled by default*/
 uint amdgpu_dc_feature_mask = 0;
+uint amdgpu_dc_debug_mask = 0;
 int amdgpu_async_gfx_ring = 1;
 int amdgpu_mcbp = 0;
 int amdgpu_discovery = -1;
 int amdgpu_mes = 0;
 int amdgpu_noretry;
 int amdgpu_force_asic_type = -1;
+int amdgpu_tmz = 0;
 
 struct amdgpu_mgpu_info mgpu_info = {
        .mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
@@ -688,13 +691,12 @@ MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (defau
 
 /**
  * DOC: hws_gws_support(bool)
- * Whether HWS support gws barriers. Default value: false (not supported)
- * This will be replaced with a MEC firmware version check once firmware
- * is ready
+ * Assume that HWS supports GWS barriers regardless of what firmware version
+ * check says. Default value: false (rely on MEC2 firmware version check).
  */
 bool hws_gws_support;
 module_param(hws_gws_support, bool, 0444);
-MODULE_PARM_DESC(hws_gws_support, "MEC FW support gws barriers (false = not supported (Default), true = supported)");
+MODULE_PARM_DESC(hws_gws_support, "Assume MEC2 FW supports GWS barriers (false = rely on FW version check (Default), true = force supported)");
 
 /**
   * DOC: queue_preemption_timeout_ms (int)
@@ -713,6 +715,13 @@ MODULE_PARM_DESC(queue_preemption_timeout_ms, "queue preemption timeout in ms (1
 MODULE_PARM_DESC(dcfeaturemask, "all stable DC features enabled (default))");
 module_param_named(dcfeaturemask, amdgpu_dc_feature_mask, uint, 0444);
 
+/**
+ * DOC: dcdebugmask (uint)
+ * Override display features enabled. See enum DC_DEBUG_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
+ */
+MODULE_PARM_DESC(dcdebugmask, "all debug options disabled (default))");
+module_param_named(dcdebugmask, amdgpu_dc_debug_mask, uint, 0444);
+
 /**
  * DOC: abmlevel (uint)
  * Override the default ABM (Adaptive Backlight Management) level used for DC
@@ -729,6 +738,16 @@ uint amdgpu_dm_abm_level = 0;
 MODULE_PARM_DESC(abmlevel, "ABM level (0 = off (default), 1-4 = backlight reduction level) ");
 module_param_named(abmlevel, amdgpu_dm_abm_level, uint, 0444);
 
+/**
+ * DOC: tmz (int)
+ * Trusted Memory Zone (TMZ) is a method to protect data being written
+ * to or read from memory.
+ *
+ * The default value: 0 (off).  TODO: change to auto till it is completed.
+ */
+MODULE_PARM_DESC(tmz, "Enable TMZ feature (-1 = auto, 0 = off (default), 1 = on)");
+module_param_named(tmz, amdgpu_tmz, int, 0444);
+
 static const struct pci_device_id pciidlist[] = {
 #ifdef  CONFIG_DRM_AMDGPU_SI
        {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
@@ -1164,14 +1183,6 @@ static int amdgpu_pmops_resume(struct device *dev)
 {
        struct drm_device *drm_dev = dev_get_drvdata(dev);
 
-       /* GPU comes up enabled by the bios on resume */
-       if (amdgpu_device_supports_boco(drm_dev) ||
-           amdgpu_device_supports_baco(drm_dev)) {
-               pm_runtime_disable(dev);
-               pm_runtime_set_active(dev);
-               pm_runtime_enable(dev);
-       }
-
        return amdgpu_device_resume(drm_dev, true);
 }
 
index 7531527067dfb164b71468379fc79647d6b552de..d878fe7fee51cc6c44b2475800da30b9c7e97619 100644 (file)
@@ -192,14 +192,22 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
  * Used For polling fence.
  * Returns 0 on success, -ENOMEM on failure.
  */
-int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s)
+int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
+                             uint32_t timeout)
 {
        uint32_t seq;
+       signed long r;
 
        if (!s)
                return -EINVAL;
 
        seq = ++ring->fence_drv.sync_seq;
+       r = amdgpu_fence_wait_polling(ring,
+                                     seq - ring->fence_drv.num_fences_mask,
+                                     timeout);
+       if (r < 1)
+               return -ETIMEDOUT;
+
        amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
                               seq, 0);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
new file mode 100644 (file)
index 0000000..815c072
--- /dev/null
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/pci.h>
+
+#include "amdgpu.h"
+#include "amdgpu_i2c.h"
+#include "smu_v11_0_i2c.h"
+#include "atom.h"
+
+#define I2C_PRODUCT_INFO_ADDR          0xAC
+#define I2C_PRODUCT_INFO_ADDR_SIZE     0x2
+#define I2C_PRODUCT_INFO_OFFSET                0xC0
+
+bool is_fru_eeprom_supported(struct amdgpu_device *adev)
+{
+       /* TODO: Gaming SKUs don't have the FRU EEPROM.
+        * Use this hack to address hangs on modprobe on gaming SKUs
+        * until a proper solution can be implemented by only supporting
+        * the explicit chip IDs for VG20 Server cards
+        *
+        * TODO: Add list of supported Arcturus DIDs once confirmed
+        */
+       if ((adev->asic_type == CHIP_VEGA20 && adev->pdev->device == 0x66a0) ||
+           (adev->asic_type == CHIP_VEGA20 && adev->pdev->device == 0x66a1) ||
+           (adev->asic_type == CHIP_VEGA20 && adev->pdev->device == 0x66a4))
+               return true;
+       return false;
+}
+
+int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr,
+                          unsigned char *buff)
+{
+       int ret, size;
+       struct i2c_msg msg = {
+                       .addr   = I2C_PRODUCT_INFO_ADDR,
+                       .flags  = I2C_M_RD,
+                       .buf    = buff,
+       };
+       buff[0] = 0;
+       buff[1] = addrptr;
+       msg.len = I2C_PRODUCT_INFO_ADDR_SIZE + 1;
+       ret = i2c_transfer(&adev->pm.smu_i2c, &msg, 1);
+
+       if (ret < 1) {
+               DRM_WARN("FRU: Failed to get size field");
+               return ret;
+       }
+
+       /* The size returned by the i2c requires subtraction of 0xC0 since the
+        * size apparently always reports as 0xC0+actual size.
+        */
+       size = buff[2] - I2C_PRODUCT_INFO_OFFSET;
+       /* Add 1 since address field was 1 byte */
+       buff[1] = addrptr + 1;
+
+       msg.len = I2C_PRODUCT_INFO_ADDR_SIZE + size;
+       ret = i2c_transfer(&adev->pm.smu_i2c, &msg, 1);
+
+       if (ret < 1) {
+               DRM_WARN("FRU: Failed to get data field");
+               return ret;
+       }
+
+       return size;
+}
+
+int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
+{
+       unsigned char buff[34];
+       int addrptr = 0, size = 0;
+
+       if (!is_fru_eeprom_supported(adev))
+               return 0;
+
+       /* If algo exists, it means that the i2c_adapter's initialized */
+       if (!adev->pm.smu_i2c.algo) {
+               DRM_WARN("Cannot access FRU, EEPROM accessor not initialized");
+               return 0;
+       }
+
+       /* There's a lot of repetition here. This is due to the FRU having
+        * variable-length fields. To get the information, we have to find the
+        * size of each field, and then keep reading along and reading along
+        * until we get all of the data that we want. We use addrptr to track
+        * the address as we go
+        */
+
+       /* The first fields are all of size 1-byte, from 0-7 are offsets that
+        * contain information that isn't useful to us.
+        * Bytes 8-a are all 1-byte and refer to the size of the entire struct,
+        * and the language field, so just start from 0xb, manufacturer size
+        */
+       addrptr = 0xb;
+       size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
+       if (size < 1) {
+               DRM_ERROR("Failed to read FRU Manufacturer, ret:%d", size);
+               return size;
+       }
+
+       /* Increment the addrptr by the size of the field, and 1 due to the
+        * size field being 1 byte. This pattern continues below.
+        */
+       addrptr += size + 1;
+       size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
+       if (size < 1) {
+               DRM_ERROR("Failed to read FRU product name, ret:%d", size);
+               return size;
+       }
+
+       /* Product name should only be 32 characters. Any more,
+        * and something could be wrong. Cap it at 32 to be safe
+        */
+       if (size > 32) {
+               DRM_WARN("FRU Product Number is larger than 32 characters. This is likely a mistake");
+               size = 32;
+       }
+       /* Start at 2 due to buff using fields 0 and 1 for the address */
+       memcpy(adev->product_name, &buff[2], size);
+       adev->product_name[size] = '\0';
+
+       addrptr += size + 1;
+       size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
+       if (size < 1) {
+               DRM_ERROR("Failed to read FRU product number, ret:%d", size);
+               return size;
+       }
+
+       /* Product number should only be 16 characters. Any more,
+        * and something could be wrong. Cap it at 16 to be safe
+        */
+       if (size > 16) {
+               DRM_WARN("FRU Product Number is larger than 16 characters. This is likely a mistake");
+               size = 16;
+       }
+       memcpy(adev->product_number, &buff[2], size);
+       adev->product_number[size] = '\0';
+
+       addrptr += size + 1;
+       size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
+
+       if (size < 1) {
+               DRM_ERROR("Failed to read FRU product version, ret:%d", size);
+               return size;
+       }
+
+       addrptr += size + 1;
+       size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
+
+       if (size < 1) {
+               DRM_ERROR("Failed to read FRU serial number, ret:%d", size);
+               return size;
+       }
+
+       /* Serial number should only be 16 characters. Any more,
+        * and something could be wrong. Cap it at 16 to be safe
+        */
+       if (size > 16) {
+               DRM_WARN("FRU Serial Number is larger than 16 characters. This is likely a mistake");
+               size = 16;
+       }
+       memcpy(adev->serial, &buff[2], size);
+       adev->serial[size] = '\0';
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h
new file mode 100644 (file)
index 0000000..968115c
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_PRODINFO_H__
+#define __AMDGPU_PRODINFO_H__
+
+int amdgpu_fru_get_product_info(struct amdgpu_device *adev);
+
+#endif  // __AMDGPU_PRODINFO_H__
index 4277125a79ee45ef61a8f9ad42f903e9b0a0fe7f..4ed9958af94e3b8d01416f7bfae93ef2e26244a5 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/module.h>
 #include <linux/pagemap.h>
 #include <linux/pci.h>
+#include <linux/dma-buf.h>
 
 #include <drm/amdgpu_drm.h>
 #include <drm/drm_debugfs.h>
@@ -161,16 +162,17 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
 
        struct amdgpu_bo_list_entry vm_pd;
        struct list_head list, duplicates;
+       struct dma_fence *fence = NULL;
        struct ttm_validate_buffer tv;
        struct ww_acquire_ctx ticket;
        struct amdgpu_bo_va *bo_va;
-       int r;
+       long r;
 
        INIT_LIST_HEAD(&list);
        INIT_LIST_HEAD(&duplicates);
 
        tv.bo = &bo->tbo;
-       tv.num_shared = 1;
+       tv.num_shared = 2;
        list_add(&tv.head, &list);
 
        amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
@@ -178,28 +180,34 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
        r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
        if (r) {
                dev_err(adev->dev, "leaking bo va because "
-                       "we fail to reserve bo (%d)\n", r);
+                       "we fail to reserve bo (%ld)\n", r);
                return;
        }
        bo_va = amdgpu_vm_bo_find(vm, bo);
-       if (bo_va && --bo_va->ref_count == 0) {
-               amdgpu_vm_bo_rmv(adev, bo_va);
-
-               if (amdgpu_vm_ready(vm)) {
-                       struct dma_fence *fence = NULL;
+       if (!bo_va || --bo_va->ref_count)
+               goto out_unlock;
 
-                       r = amdgpu_vm_clear_freed(adev, vm, &fence);
-                       if (unlikely(r)) {
-                               dev_err(adev->dev, "failed to clear page "
-                                       "tables on GEM object close (%d)\n", r);
-                       }
+       amdgpu_vm_bo_rmv(adev, bo_va);
+       if (!amdgpu_vm_ready(vm))
+               goto out_unlock;
 
-                       if (fence) {
-                               amdgpu_bo_fence(bo, fence, true);
-                               dma_fence_put(fence);
-                       }
-               }
+       fence = dma_resv_get_excl(bo->tbo.base.resv);
+       if (fence) {
+               amdgpu_bo_fence(bo, fence, true);
+               fence = NULL;
        }
+
+       r = amdgpu_vm_clear_freed(adev, vm, &fence);
+       if (r || !fence)
+               goto out_unlock;
+
+       amdgpu_bo_fence(bo, fence, true);
+       dma_fence_put(fence);
+
+out_unlock:
+       if (unlikely(r < 0))
+               dev_err(adev->dev, "failed to clear page "
+                       "tables on GEM object close (%ld)\n", r);
        ttm_eu_backoff_reservation(&ticket, &list);
 }
 
@@ -226,7 +234,8 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
                      AMDGPU_GEM_CREATE_CPU_GTT_USWC |
                      AMDGPU_GEM_CREATE_VRAM_CLEARED |
                      AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
-                     AMDGPU_GEM_CREATE_EXPLICIT_SYNC))
+                     AMDGPU_GEM_CREATE_EXPLICIT_SYNC |
+                     AMDGPU_GEM_CREATE_ENCRYPTED))
 
                return -EINVAL;
 
@@ -234,6 +243,11 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
        if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
                return -EINVAL;
 
+       if (!amdgpu_is_tmz(adev) && (flags & AMDGPU_GEM_CREATE_ENCRYPTED)) {
+               DRM_NOTE_ONCE("Cannot allocate secure buffer since TMZ is disabled\n");
+               return -EINVAL;
+       }
+
        /* create a gem object to contain this object in */
        if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
            AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
@@ -854,7 +868,8 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
        attachment = READ_ONCE(bo->tbo.base.import_attach);
 
        if (attachment)
-               seq_printf(m, " imported from %p", dma_buf);
+               seq_printf(m, " imported from %p%s", dma_buf,
+                          attachment->peer2peer ? " P2P" : "");
        else if (dma_buf)
                seq_printf(m, " exported as %p", dma_buf);
 
index 6b9c9193cdfa72df9434fd16ccaf4367cfa56fb9..d612033a23ac638d8b6e0a56cdb48cb63eeb62a8 100644 (file)
@@ -48,7 +48,7 @@ int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec,
        return bit;
 }
 
-void amdgpu_gfx_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
+void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
                                 int *mec, int *pipe, int *queue)
 {
        *queue = bit % adev->gfx.mec.num_queue_per_pipe;
@@ -274,7 +274,7 @@ static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
                if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap))
                        continue;
 
-               amdgpu_gfx_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
+               amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
 
                /*
                 * 1. Using pipes 2/3 from MEC 2 seems cause problems.
@@ -304,10 +304,6 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
 
        spin_lock_init(&kiq->ring_lock);
 
-       r = amdgpu_device_wb_get(adev, &kiq->reg_val_offs);
-       if (r)
-               return r;
-
        ring->adev = NULL;
        ring->ring_obj = NULL;
        ring->use_doorbell = true;
@@ -318,9 +314,11 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
                return r;
 
        ring->eop_gpu_addr = kiq->eop_gpu_addr;
+       ring->no_scheduler = true;
        sprintf(ring->name, "kiq_%d.%d.%d", ring->me, ring->pipe, ring->queue);
        r = amdgpu_ring_init(adev, ring, 1024,
-                            irq, AMDGPU_CP_KIQ_IRQ_DRIVER0);
+                            irq, AMDGPU_CP_KIQ_IRQ_DRIVER0,
+                            AMDGPU_RING_PRIO_DEFAULT);
        if (r)
                dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r);
 
@@ -329,7 +327,6 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
 
 void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring)
 {
-       amdgpu_device_wb_free(ring->adev, ring->adev->gfx.kiq.reg_val_offs);
        amdgpu_ring_fini(ring);
 }
 
@@ -488,6 +485,19 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev)
        return amdgpu_ring_test_helper(kiq_ring);
 }
 
+int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
+                                       int queue_bit)
+{
+       int mec, pipe, queue;
+       int set_resource_bit = 0;
+
+       amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
+
+       set_resource_bit = mec * 4 * 8 + pipe * 8 + queue;
+
+       return set_resource_bit;
+}
+
 int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
 {
        struct amdgpu_kiq *kiq = &adev->gfx.kiq;
@@ -510,7 +520,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
                        break;
                }
 
-               queue_mask |= (1ull << i);
+               queue_mask |= (1ull << amdgpu_queue_mask_bit_to_set_resource_bit(adev, i));
        }
 
        DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
@@ -670,16 +680,23 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
 {
        signed long r, cnt = 0;
        unsigned long flags;
-       uint32_t seq;
+       uint32_t seq, reg_val_offs = 0, value = 0;
        struct amdgpu_kiq *kiq = &adev->gfx.kiq;
        struct amdgpu_ring *ring = &kiq->ring;
 
        BUG_ON(!ring->funcs->emit_rreg);
 
        spin_lock_irqsave(&kiq->ring_lock, flags);
+       if (amdgpu_device_wb_get(adev, &reg_val_offs)) {
+               pr_err("critical bug! too many kiq readers\n");
+               goto failed_unlock;
+       }
        amdgpu_ring_alloc(ring, 32);
-       amdgpu_ring_emit_rreg(ring, reg);
-       amdgpu_fence_emit_polling(ring, &seq);
+       amdgpu_ring_emit_rreg(ring, reg, reg_val_offs);
+       r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+       if (r)
+               goto failed_undo;
+
        amdgpu_ring_commit(ring);
        spin_unlock_irqrestore(&kiq->ring_lock, flags);
 
@@ -705,9 +722,18 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
        if (cnt > MAX_KIQ_REG_TRY)
                goto failed_kiq_read;
 
-       return adev->wb.wb[kiq->reg_val_offs];
+       mb();
+       value = adev->wb.wb[reg_val_offs];
+       amdgpu_device_wb_free(adev, reg_val_offs);
+       return value;
 
+failed_undo:
+       amdgpu_ring_undo(ring);
+failed_unlock:
+       spin_unlock_irqrestore(&kiq->ring_lock, flags);
 failed_kiq_read:
+       if (reg_val_offs)
+               amdgpu_device_wb_free(adev, reg_val_offs);
        pr_err("failed to read reg:%x\n", reg);
        return ~0;
 }
@@ -725,7 +751,10 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
        spin_lock_irqsave(&kiq->ring_lock, flags);
        amdgpu_ring_alloc(ring, 32);
        amdgpu_ring_emit_wreg(ring, reg, v);
-       amdgpu_fence_emit_polling(ring, &seq);
+       r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+       if (r)
+               goto failed_undo;
+
        amdgpu_ring_commit(ring);
        spin_unlock_irqrestore(&kiq->ring_lock, flags);
 
@@ -754,6 +783,9 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
 
        return;
 
+failed_undo:
+       amdgpu_ring_undo(ring);
+       spin_unlock_irqrestore(&kiq->ring_lock, flags);
 failed_kiq_write:
        pr_err("failed to write reg:%x\n", reg);
 }
index 5825692d07e42a33a31004bebd6b7ebcb4936b09..d43c11671a384ccc1507cd8b63060e86f43622d8 100644 (file)
@@ -103,7 +103,6 @@ struct amdgpu_kiq {
        struct amdgpu_ring      ring;
        struct amdgpu_irq_src   irq;
        const struct kiq_pm4_funcs *pmf;
-       uint32_t                        reg_val_offs;
 };
 
 /*
@@ -286,13 +285,8 @@ struct amdgpu_gfx {
        bool                            me_fw_write_wait;
        bool                            cp_fw_write_wait;
        struct amdgpu_ring              gfx_ring[AMDGPU_MAX_GFX_RINGS];
-       struct drm_gpu_scheduler        *gfx_sched[AMDGPU_MAX_GFX_RINGS];
-       uint32_t                        num_gfx_sched;
        unsigned                        num_gfx_rings;
        struct amdgpu_ring              compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
-       struct drm_gpu_scheduler        **compute_prio_sched[AMDGPU_GFX_PIPE_PRIO_MAX];
-       struct drm_gpu_scheduler        *compute_sched[AMDGPU_MAX_COMPUTE_RINGS];
-       uint32_t                        num_compute_sched[AMDGPU_GFX_PIPE_PRIO_MAX];
        unsigned                        num_compute_rings;
        struct amdgpu_irq_src           eop_irq;
        struct amdgpu_irq_src           priv_reg_irq;
@@ -370,7 +364,7 @@ void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev);
 
 int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec,
                                int pipe, int queue);
-void amdgpu_gfx_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
+void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
                                 int *mec, int *pipe, int *queue);
 bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int mec,
                                     int pipe, int queue);
index 5884ab590486e6f941d67f931b34c4f0d2f9b01c..acabb57aa8af9e8007e49dc17b53071ed40f65eb 100644 (file)
@@ -136,8 +136,8 @@ uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
 /**
  * amdgpu_gmc_vram_location - try to find VRAM location
  *
- * @adev: amdgpu device structure holding all necessary informations
- * @mc: memory controller structure holding memory informations
+ * @adev: amdgpu device structure holding all necessary information
+ * @mc: memory controller structure holding memory information
  * @base: base address at which to put VRAM
  *
  * Function will try to place VRAM at base address provided
@@ -165,8 +165,8 @@ void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
 /**
  * amdgpu_gmc_gart_location - try to find GART location
  *
- * @adev: amdgpu device structure holding all necessary informations
- * @mc: memory controller structure holding memory informations
+ * @adev: amdgpu device structure holding all necessary information
+ * @mc: memory controller structure holding memory information
  *
  * Function will place try to place GART before or after VRAM.
  *
@@ -207,8 +207,8 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
 
 /**
  * amdgpu_gmc_agp_location - try to find AGP location
- * @adev: amdgpu device structure holding all necessary informations
- * @mc: memory controller structure holding memory informations
+ * @adev: amdgpu device structure holding all necessary information
+ * @mc: memory controller structure holding memory information
  *
  * Function will place try to find a place for the AGP BAR in the MC address
  * space.
@@ -373,3 +373,38 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
 
        return 0;
 }
+
+/**
+ * amdgpu_tmz_set -- check and set if a device supports TMZ
+ * @adev: amdgpu_device pointer
+ *
+ * Check and set if an the device @adev supports Trusted Memory
+ * Zones (TMZ).
+ */
+void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
+{
+       switch (adev->asic_type) {
+       case CHIP_RAVEN:
+       case CHIP_RENOIR:
+       case CHIP_NAVI10:
+       case CHIP_NAVI14:
+       case CHIP_NAVI12:
+               /* Don't enable it by default yet.
+                */
+               if (amdgpu_tmz < 1) {
+                       adev->gmc.tmz_enabled = false;
+                       dev_info(adev->dev,
+                                "Trusted Memory Zone (TMZ) feature disabled as experimental (default)\n");
+               } else {
+                       adev->gmc.tmz_enabled = true;
+                       dev_info(adev->dev,
+                                "Trusted Memory Zone (TMZ) feature enabled as experimental (cmd line)\n");
+               }
+               break;
+       default:
+               adev->gmc.tmz_enabled = false;
+               dev_warn(adev->dev,
+                        "Trusted Memory Zone (TMZ) feature not supported\n");
+               break;
+       }
+}
index 7546da0cc70c7019c94f58fb0ee66debcdc93a22..2bd9423c1dabbbd3106e98ff4b3c823b994e35bd 100644 (file)
@@ -213,6 +213,8 @@ struct amdgpu_gmc {
        } fault_hash[AMDGPU_GMC_FAULT_HASH_SIZE];
        uint64_t                last_fault:AMDGPU_GMC_FAULT_RING_ORDER;
 
+       bool tmz_enabled;
+
        const struct amdgpu_gmc_funcs   *gmc_funcs;
 
        struct amdgpu_xgmi xgmi;
@@ -276,4 +278,6 @@ int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev);
 void amdgpu_gmc_ras_fini(struct amdgpu_device *adev);
 int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev);
 
+extern void amdgpu_gmc_tmz_set(struct amdgpu_device *adev);
+
 #endif
index ccbd7acfc4cb1be94259e19528cc271823305010..b91853fd66d375ee8fb13b7424e1e4d1ff1a9df7 100644 (file)
  * Returns 0 on success, error on failure.
  */
 int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
-                 unsigned size, struct amdgpu_ib *ib)
+                 unsigned size, enum amdgpu_ib_pool_type pool_type,
+                 struct amdgpu_ib *ib)
 {
        int r;
 
        if (size) {
-               r = amdgpu_sa_bo_new(&adev->ring_tmp_bo,
+               r = amdgpu_sa_bo_new(&adev->ib_pools[pool_type],
                                      &ib->sa_bo, size, 256);
                if (r) {
                        dev_err(adev->dev, "failed to get a new IB (%d)\n", r);
@@ -131,6 +132,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        uint64_t fence_ctx;
        uint32_t status = 0, alloc_size;
        unsigned fence_flags = 0;
+       bool secure;
 
        unsigned i;
        int r = 0;
@@ -159,6 +161,12 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
                return -EINVAL;
        }
 
+       if ((ib->flags & AMDGPU_IB_FLAGS_SECURE) &&
+           (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)) {
+               dev_err(adev->dev, "secure submissions not supported on compute rings\n");
+               return -EINVAL;
+       }
+
        alloc_size = ring->funcs->emit_frame_size + num_ibs *
                ring->funcs->emit_ib_size;
 
@@ -181,6 +189,9 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
                dma_fence_put(tmp);
        }
 
+       if ((ib->flags & AMDGPU_IB_FLAG_EMIT_MEM_SYNC) && ring->funcs->emit_mem_sync)
+               ring->funcs->emit_mem_sync(ring);
+
        if (ring->funcs->insert_start)
                ring->funcs->insert_start(ring);
 
@@ -215,6 +226,14 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
                amdgpu_ring_emit_cntxcntl(ring, status);
        }
 
+       /* Setup initial TMZiness and send it off.
+        */
+       secure = false;
+       if (job && ring->funcs->emit_frame_cntl) {
+               secure = ib->flags & AMDGPU_IB_FLAGS_SECURE;
+               amdgpu_ring_emit_frame_cntl(ring, true, secure);
+       }
+
        for (i = 0; i < num_ibs; ++i) {
                ib = &ibs[i];
 
@@ -226,12 +245,20 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
                    !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
                        continue;
 
+               if (job && ring->funcs->emit_frame_cntl) {
+                       if (secure != !!(ib->flags & AMDGPU_IB_FLAGS_SECURE)) {
+                               amdgpu_ring_emit_frame_cntl(ring, false, secure);
+                               secure = !secure;
+                               amdgpu_ring_emit_frame_cntl(ring, true, secure);
+                       }
+               }
+
                amdgpu_ring_emit_ib(ring, job, ib, status);
                status &= ~AMDGPU_HAVE_CTX_SWITCH;
        }
 
-       if (ring->funcs->emit_tmz)
-               amdgpu_ring_emit_tmz(ring, false);
+       if (job && ring->funcs->emit_frame_cntl)
+               amdgpu_ring_emit_frame_cntl(ring, false, secure);
 
 #ifdef CONFIG_X86_64
        if (!(adev->flags & AMD_IS_APU))
@@ -280,22 +307,32 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
  */
 int amdgpu_ib_pool_init(struct amdgpu_device *adev)
 {
-       int r;
+       unsigned size;
+       int r, i;
 
-       if (adev->ib_pool_ready) {
+       if (adev->ib_pool_ready)
                return 0;
-       }
-       r = amdgpu_sa_bo_manager_init(adev, &adev->ring_tmp_bo,
-                                     AMDGPU_IB_POOL_SIZE*64*1024,
-                                     AMDGPU_GPU_PAGE_SIZE,
-                                     AMDGPU_GEM_DOMAIN_GTT);
-       if (r) {
-               return r;
-       }
 
+       for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) {
+               if (i == AMDGPU_IB_POOL_DIRECT)
+                       size = PAGE_SIZE * 2;
+               else
+                       size = AMDGPU_IB_POOL_SIZE;
+
+               r = amdgpu_sa_bo_manager_init(adev, &adev->ib_pools[i],
+                                             size, AMDGPU_GPU_PAGE_SIZE,
+                                             AMDGPU_GEM_DOMAIN_GTT);
+               if (r)
+                       goto error;
+       }
        adev->ib_pool_ready = true;
 
        return 0;
+
+error:
+       while (i--)
+               amdgpu_sa_bo_manager_fini(adev, &adev->ib_pools[i]);
+       return r;
 }
 
 /**
@@ -308,10 +345,14 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev)
  */
 void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
 {
-       if (adev->ib_pool_ready) {
-               amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo);
-               adev->ib_pool_ready = false;
-       }
+       int i;
+
+       if (!adev->ib_pool_ready)
+               return;
+
+       for (i = 0; i < AMDGPU_IB_POOL_MAX; i++)
+               amdgpu_sa_bo_manager_fini(adev, &adev->ib_pools[i]);
+       adev->ib_pool_ready = false;
 }
 
 /**
@@ -326,9 +367,9 @@ void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
  */
 int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
 {
-       unsigned i;
-       int r, ret = 0;
        long tmo_gfx, tmo_mm;
+       int r, ret = 0;
+       unsigned i;
 
        tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT;
        if (amdgpu_sriov_vf(adev)) {
@@ -406,10 +447,16 @@ static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data)
        struct drm_device *dev = node->minor->dev;
        struct amdgpu_device *adev = dev->dev_private;
 
-       amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo, m);
+       seq_printf(m, "--------------------- DELAYED --------------------- \n");
+       amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DELAYED],
+                                    m);
+       seq_printf(m, "-------------------- IMMEDIATE -------------------- \n");
+       amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_IMMEDIATE],
+                                    m);
+       seq_printf(m, "--------------------- DIRECT ---------------------- \n");
+       amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT], m);
 
        return 0;
-
 }
 
 static const struct drm_info_list amdgpu_debugfs_sa_list[] = {
index 3a67f6c046d4d4579cfb63bcfd20f4f4e35027db..fe92dcd94d4ae6af2c7a1b27ab8709d4cdea51de 100644 (file)
@@ -282,7 +282,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
            !dma_fence_is_later(updates, (*id)->flushed_updates))
            updates = NULL;
 
-       if ((*id)->owner != vm->direct.fence_context ||
+       if ((*id)->owner != vm->immediate.fence_context ||
            job->vm_pd_addr != (*id)->pd_gpu_addr ||
            updates || !(*id)->last_flush ||
            ((*id)->last_flush->context != fence_context &&
@@ -349,7 +349,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
                struct dma_fence *flushed;
 
                /* Check all the prerequisites to using this VMID */
-               if ((*id)->owner != vm->direct.fence_context)
+               if ((*id)->owner != vm->immediate.fence_context)
                        continue;
 
                if ((*id)->pd_gpu_addr != job->vm_pd_addr)
@@ -448,7 +448,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
        }
 
        id->pd_gpu_addr = job->vm_pd_addr;
-       id->owner = vm->direct.fence_context;
+       id->owner = vm->immediate.fence_context;
 
        if (job->vm_needs_flush) {
                dma_fence_put(id->last_flush);
index 5ed4227f304bd49050139505622cdf2673878e8a..0cc4c67f95f721f7671d5e46824b0b0d5e70e7fa 100644 (file)
@@ -260,7 +260,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
                nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags);
                if (nvec > 0) {
                        adev->irq.msi_enabled = true;
-                       dev_dbg(adev->dev, "amdgpu: using MSI/MSI-X.\n");
+                       dev_dbg(adev->dev, "using MSI/MSI-X.\n");
                }
        }
 
index 4981e443a88473050e27559e4f43acc1c5a13d58..47207188c5692ad068217793a957471e13c3a035 100644 (file)
@@ -33,6 +33,7 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
        struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
        struct amdgpu_job *job = to_amdgpu_job(s_job);
        struct amdgpu_task_info ti;
+       struct amdgpu_device *adev = ring->adev;
 
        memset(&ti, 0, sizeof(struct amdgpu_task_info));
 
@@ -49,10 +50,13 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
        DRM_ERROR("Process information: process %s pid %d thread %s pid %d\n",
                  ti.process_name, ti.tgid, ti.task_name, ti.pid);
 
-       if (amdgpu_device_should_recover_gpu(ring->adev))
+       if (amdgpu_device_should_recover_gpu(ring->adev)) {
                amdgpu_device_gpu_recover(ring->adev, job);
-       else
+       } else {
                drm_sched_suspend_timeout(&ring->sched);
+               if (amdgpu_sriov_vf(adev))
+                       adev->virt.tdr_debug = true;
+       }
 }
 
 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
@@ -87,7 +91,8 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
 }
 
 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
-                            struct amdgpu_job **job)
+               enum amdgpu_ib_pool_type pool_type,
+               struct amdgpu_job **job)
 {
        int r;
 
@@ -95,7 +100,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
        if (r)
                return r;
 
-       r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]);
+       r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]);
        if (r)
                kfree(*job);
 
@@ -140,7 +145,6 @@ void amdgpu_job_free(struct amdgpu_job *job)
 int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
                      void *owner, struct dma_fence **f)
 {
-       enum drm_sched_priority priority;
        int r;
 
        if (!f)
@@ -152,7 +156,6 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
 
        *f = dma_fence_get(&job->base.s_fence->finished);
        amdgpu_job_free_resources(job);
-       priority = job->base.s_priority;
        drm_sched_entity_push_job(&job->base, entity);
 
        return 0;
index 3f7b8433d17904fd8e535219108a47f0f54c9e3d..81caac9b958a4a52fcdd9d0a06890ed4b6514eaf 100644 (file)
@@ -38,6 +38,7 @@
 #define AMDGPU_JOB_GET_VMID(job) ((job) ? (job)->vmid : 0)
 
 struct amdgpu_fence;
+enum amdgpu_ib_pool_type;
 
 struct amdgpu_job {
        struct drm_sched_job    base;
@@ -61,14 +62,12 @@ struct amdgpu_job {
        /* user fence handling */
        uint64_t                uf_addr;
        uint64_t                uf_sequence;
-
 };
 
 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
                     struct amdgpu_job **job, struct amdgpu_vm *vm);
 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
-                            struct amdgpu_job **job);
-
+               enum amdgpu_ib_pool_type pool, struct amdgpu_job **job);
 void amdgpu_job_free_resources(struct amdgpu_job *job);
 void amdgpu_job_free(struct amdgpu_job *job);
 int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
index 5727f00afc8e6503647256cb171473eb0670aeda..d31d65e6b0398eaf6c20121f815b8fc4a88a1226 100644 (file)
@@ -144,7 +144,8 @@ static int amdgpu_jpeg_dec_set_reg(struct amdgpu_ring *ring, uint32_t handle,
        const unsigned ib_size_dw = 16;
        int i, r;
 
-       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+                                       AMDGPU_IB_POOL_DIRECT, &job);
        if (r)
                return r;
 
index bd9ef9cc86deae6c7183a6ccab20a33dbb869cce..5131a0a1bc8aa90c8c9dde5482ff466ee0ce8977 100644 (file)
@@ -43,8 +43,6 @@ struct amdgpu_jpeg {
        uint8_t num_jpeg_inst;
        struct amdgpu_jpeg_inst inst[AMDGPU_MAX_JPEG_INSTANCES];
        struct amdgpu_jpeg_reg internal;
-       struct drm_gpu_scheduler *jpeg_sched[AMDGPU_MAX_JPEG_INSTANCES];
-       uint32_t num_jpeg_sched;
        unsigned harvest_config;
        struct delayed_work idle_work;
        enum amd_powergating_state cur_state;
index a9086ea1ab60e9bd0047942a7fde7fbaa773d831..d7e17e34fee171a2d103fe79a2fc27ac9d68c95c 100644 (file)
@@ -183,18 +183,18 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
        /* Call ACPI methods: require modeset init
         * but failure is not fatal
         */
-       if (!r) {
-               acpi_status = amdgpu_acpi_init(adev);
-               if (acpi_status)
-                       dev_dbg(&dev->pdev->dev,
-                               "Error during ACPI methods call\n");
-       }
+
+       acpi_status = amdgpu_acpi_init(adev);
+       if (acpi_status)
+               dev_dbg(&dev->pdev->dev, "Error during ACPI methods call\n");
 
        if (adev->runpm) {
-               dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
+               /* only need to skip on ATPX */
+               if (amdgpu_device_supports_boco(dev) &&
+                   !amdgpu_is_atpx_hybrid())
+                       dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
                pm_runtime_use_autosuspend(dev->dev);
                pm_runtime_set_autosuspend_delay(dev->dev, 5000);
-               pm_runtime_set_active(dev->dev);
                pm_runtime_allow(dev->dev);
                pm_runtime_mark_last_busy(dev->dev);
                pm_runtime_put_autosuspend(dev->dev);
index 919bd566ba3cfc20670da24a2647ae1425f1c4dd..edaac242ff85708a8f57635791c954c8792c0dcc 100644 (file)
@@ -77,7 +77,6 @@ struct amdgpu_nbio_funcs {
                                      u32 *flags);
        void (*ih_control)(struct amdgpu_device *adev);
        void (*init_registers)(struct amdgpu_device *adev);
-       void (*detect_hw_virt)(struct amdgpu_device *adev);
        void (*remap_hdp_registers)(struct amdgpu_device *adev);
        void (*handle_ras_controller_intr_no_bifring)(struct amdgpu_device *adev);
        void (*handle_ras_err_event_athub_intr_no_bifring)(struct amdgpu_device *adev);
index c687f5415b3f1d03610f4904a42748ece428f618..3d822eba9a5d708b438e7a9cf90dbf8982422f95 100644 (file)
@@ -753,7 +753,7 @@ int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence)
 
        return amdgpu_copy_buffer(ring, shadow_addr, parent_addr,
                                  amdgpu_bo_size(shadow), NULL, fence,
-                                 true, false);
+                                 true, false, false);
 }
 
 /**
index 5e39ecd8cc28d099f31ba23e3dcbfa8d9fae515d..7d41f7b9a340574c3422e1ef9a570a27b51351cd 100644 (file)
@@ -229,6 +229,17 @@ static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
        return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
 }
 
+/**
+ * amdgpu_bo_encrypted - test if the BO is encrypted
+ * @bo: pointer to a buffer object
+ *
+ * Return true if the buffer object is encrypted, false otherwise.
+ */
+static inline bool amdgpu_bo_encrypted(struct amdgpu_bo *bo)
+{
+       return bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED;
+}
+
 bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
 void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
 
index abe94a55ecad4bf5fb489669a1fe7d5d2fccfc8c..d7646cbce346e0894cacb74bf96d86a93c9f719b 100644 (file)
@@ -154,18 +154,15 @@ int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors senso
  *
  */
 
-static ssize_t amdgpu_get_dpm_state(struct device *dev,
-                                   struct device_attribute *attr,
-                                   char *buf)
+static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
+                                         struct device_attribute *attr,
+                                         char *buf)
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = ddev->dev_private;
        enum amd_pm_state_type pm;
        int ret;
 
-       if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
-               return 0;
-
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0)
                return ret;
@@ -189,19 +186,16 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
                        (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
 }
 
-static ssize_t amdgpu_set_dpm_state(struct device *dev,
-                                   struct device_attribute *attr,
-                                   const char *buf,
-                                   size_t count)
+static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
+                                         struct device_attribute *attr,
+                                         const char *buf,
+                                         size_t count)
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = ddev->dev_private;
        enum amd_pm_state_type  state;
        int ret;
 
-       if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
-               return -EINVAL;
-
        if (strncmp("battery", buf, strlen("battery")) == 0)
                state = POWER_STATE_TYPE_BATTERY;
        else if (strncmp("balanced", buf, strlen("balanced")) == 0)
@@ -294,18 +288,15 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
  *
  */
 
-static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
-                                               struct device_attribute *attr,
-                                                               char *buf)
+static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
+                                                           struct device_attribute *attr,
+                                                           char *buf)
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = ddev->dev_private;
        enum amd_dpm_forced_level level = 0xff;
        int ret;
 
-       if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
-               return 0;
-
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0)
                return ret;
@@ -332,10 +323,10 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
                        "unknown");
 }
 
-static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
-                                                      struct device_attribute *attr,
-                                                      const char *buf,
-                                                      size_t count)
+static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
+                                                           struct device_attribute *attr,
+                                                           const char *buf,
+                                                           size_t count)
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = ddev->dev_private;
@@ -343,9 +334,6 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
        enum amd_dpm_forced_level current_level = 0xff;
        int ret = 0;
 
-       if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
-               return -EINVAL;
-
        if (strncmp("low", buf, strlen("low")) == 0) {
                level = AMD_DPM_FORCED_LEVEL_LOW;
        } else if (strncmp("high", buf, strlen("high")) == 0) {
@@ -383,6 +371,15 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
                return count;
        }
 
+       if (adev->asic_type == CHIP_RAVEN) {
+               if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
+                       if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && level == AMD_DPM_FORCED_LEVEL_MANUAL)
+                               amdgpu_gfx_off_ctrl(adev, false);
+                       else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && level != AMD_DPM_FORCED_LEVEL_MANUAL)
+                               amdgpu_gfx_off_ctrl(adev, true);
+               }
+       }
+
        /* profile_exit setting is valid only when current mode is in profile mode */
        if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
            AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
@@ -444,8 +441,11 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
                ret = smu_get_power_num_states(&adev->smu, &data);
                if (ret)
                        return ret;
-       } else if (adev->powerplay.pp_funcs->get_pp_num_states)
+       } else if (adev->powerplay.pp_funcs->get_pp_num_states) {
                amdgpu_dpm_get_pp_num_states(adev, &data);
+       } else {
+               memset(&data, 0, sizeof(data));
+       }
 
        pm_runtime_mark_last_busy(ddev->dev);
        pm_runtime_put_autosuspend(ddev->dev);
@@ -472,9 +472,6 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
        enum amd_pm_state_type pm = 0;
        int i = 0, ret = 0;
 
-       if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
-               return 0;
-
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0)
                return ret;
@@ -511,9 +508,6 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = ddev->dev_private;
 
-       if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
-               return 0;
-
        if (adev->pp_force_state_enabled)
                return amdgpu_get_pp_cur_state(dev, attr, buf);
        else
@@ -531,9 +525,6 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
        unsigned long idx;
        int ret;
 
-       if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
-               return -EINVAL;
-
        if (strlen(buf) == 1)
                adev->pp_force_state_enabled = false;
        else if (is_support_sw_smu(adev))
@@ -589,9 +580,6 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
        char *table = NULL;
        int size, ret;
 
-       if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
-               return 0;
-
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0)
                return ret;
@@ -631,9 +619,6 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
        struct amdgpu_device *adev = ddev->dev_private;
        int ret = 0;
 
-       if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
-               return -EINVAL;
-
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0)
                return ret;
@@ -736,9 +721,6 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
        const char delimiter[3] = {' ', '\n', '\0'};
        uint32_t type;
 
-       if (amdgpu_sriov_vf(adev))
-               return -EINVAL;
-
        if (count > 127)
                return -EINVAL;
 
@@ -828,9 +810,6 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
        ssize_t size;
        int ret;
 
-       if (amdgpu_sriov_vf(adev))
-               return 0;
-
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0)
                return ret;
@@ -870,19 +849,16 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
  * the corresponding bit from original ppfeature masks and input the
  * new ppfeature masks.
  */
-static ssize_t amdgpu_set_pp_feature_status(struct device *dev,
-               struct device_attribute *attr,
-               const char *buf,
-               size_t count)
+static ssize_t amdgpu_set_pp_features(struct device *dev,
+                                     struct device_attribute *attr,
+                                     const char *buf,
+                                     size_t count)
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = ddev->dev_private;
        uint64_t featuremask;
        int ret;
 
-       if (amdgpu_sriov_vf(adev))
-               return -EINVAL;
-
        ret = kstrtou64(buf, 0, &featuremask);
        if (ret)
                return -EINVAL;
@@ -914,18 +890,15 @@ static ssize_t amdgpu_set_pp_feature_status(struct device *dev,
        return count;
 }
 
-static ssize_t amdgpu_get_pp_feature_status(struct device *dev,
-               struct device_attribute *attr,
-               char *buf)
+static ssize_t amdgpu_get_pp_features(struct device *dev,
+                                     struct device_attribute *attr,
+                                     char *buf)
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = ddev->dev_private;
        ssize_t size;
        int ret;
 
-       if (amdgpu_sriov_vf(adev))
-               return 0;
-
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0)
                return ret;
@@ -982,9 +955,6 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
        ssize_t size;
        int ret;
 
-       if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
-               return 0;
-
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0)
                return ret;
@@ -1048,9 +1018,6 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
        int ret;
        uint32_t mask = 0;
 
-       if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
-               return -EINVAL;
-
        ret = amdgpu_read_mask(buf, count, &mask);
        if (ret)
                return ret;
@@ -1082,9 +1049,6 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
        ssize_t size;
        int ret;
 
-       if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
-               return 0;
-
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0)
                return ret;
@@ -1112,9 +1076,6 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
        uint32_t mask = 0;
        int ret;
 
-       if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
-                       return -EINVAL;
-
        ret = amdgpu_read_mask(buf, count, &mask);
        if (ret)
                return ret;
@@ -1146,9 +1107,6 @@ static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
        ssize_t size;
        int ret;
 
-       if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
-               return 0;
-
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0)
                return ret;
@@ -1176,9 +1134,6 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
        int ret;
        uint32_t mask = 0;
 
-       if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
-               return -EINVAL;
-
        ret = amdgpu_read_mask(buf, count, &mask);
        if (ret)
                return ret;
@@ -1212,9 +1167,6 @@ static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
        ssize_t size;
        int ret;
 
-       if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
-               return 0;
-
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0)
                return ret;
@@ -1242,9 +1194,6 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
        int ret;
        uint32_t mask = 0;
 
-       if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
-               return -EINVAL;
-
        ret = amdgpu_read_mask(buf, count, &mask);
        if (ret)
                return ret;
@@ -1278,9 +1227,6 @@ static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
        ssize_t size;
        int ret;
 
-       if (amdgpu_sriov_vf(adev))
-               return 0;
-
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0)
                return ret;
@@ -1308,9 +1254,6 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
        int ret;
        uint32_t mask = 0;
 
-       if (amdgpu_sriov_vf(adev))
-               return -EINVAL;
-
        ret = amdgpu_read_mask(buf, count, &mask);
        if (ret)
                return ret;
@@ -1344,9 +1287,6 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
        ssize_t size;
        int ret;
 
-       if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
-               return 0;
-
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0)
                return ret;
@@ -1374,9 +1314,6 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
        int ret;
        uint32_t mask = 0;
 
-       if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
-               return -EINVAL;
-
        ret = amdgpu_read_mask(buf, count, &mask);
        if (ret)
                return ret;
@@ -1410,9 +1347,6 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
        uint32_t value = 0;
        int ret;
 
-       if (amdgpu_sriov_vf(adev))
-               return 0;
-
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0)
                return ret;
@@ -1438,9 +1372,6 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
        int ret;
        long int value;
 
-       if (amdgpu_sriov_vf(adev))
-               return -EINVAL;
-
        ret = kstrtol(buf, 0, &value);
 
        if (ret)
@@ -1479,9 +1410,6 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
        uint32_t value = 0;
        int ret;
 
-       if (amdgpu_sriov_vf(adev))
-               return 0;
-
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0)
                return ret;
@@ -1507,9 +1435,6 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
        int ret;
        long int value;
 
-       if (amdgpu_sriov_vf(adev))
-               return 0;
-
        ret = kstrtol(buf, 0, &value);
 
        if (ret)
@@ -1568,9 +1493,6 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
        ssize_t size;
        int ret;
 
-       if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
-               return 0;
-
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0)
                return ret;
@@ -1612,9 +1534,6 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
        if (ret)
                return -EINVAL;
 
-       if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
-               return -EINVAL;
-
        if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
                if (count < 2 || count > 127)
                        return -EINVAL;
@@ -1660,17 +1579,14 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
  * The SMU firmware computes a percentage of load based on the
  * aggregate activity level in the IP cores.
  */
-static ssize_t amdgpu_get_busy_percent(struct device *dev,
-               struct device_attribute *attr,
-               char *buf)
+static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
+                                          struct device_attribute *attr,
+                                          char *buf)
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = ddev->dev_private;
        int r, value, size = sizeof(value);
 
-       if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
-               return 0;
-
        r = pm_runtime_get_sync(ddev->dev);
        if (r < 0)
                return r;
@@ -1696,17 +1612,14 @@ static ssize_t amdgpu_get_busy_percent(struct device *dev,
  * The SMU firmware computes a percentage of load based on the
  * aggregate activity level in the IP cores.
  */
-static ssize_t amdgpu_get_memory_busy_percent(struct device *dev,
-               struct device_attribute *attr,
-               char *buf)
+static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
+                                          struct device_attribute *attr,
+                                          char *buf)
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = ddev->dev_private;
        int r, value, size = sizeof(value);
 
-       if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
-               return 0;
-
        r = pm_runtime_get_sync(ddev->dev);
        if (r < 0)
                return r;
@@ -1742,11 +1655,14 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev,
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = ddev->dev_private;
-       uint64_t count0, count1;
+       uint64_t count0 = 0, count1 = 0;
        int ret;
 
-       if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
-               return 0;
+       if (adev->flags & AMD_IS_APU)
+               return -ENODATA;
+
+       if (!adev->asic_funcs->get_pcie_usage)
+               return -ENODATA;
 
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0)
@@ -1778,66 +1694,191 @@ static ssize_t amdgpu_get_unique_id(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = ddev->dev_private;
 
-       if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
-               return 0;
-
        if (adev->unique_id)
                return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id);
 
        return 0;
 }
 
-static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
-static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
-                  amdgpu_get_dpm_forced_performance_level,
-                  amdgpu_set_dpm_forced_performance_level);
-static DEVICE_ATTR(pp_num_states, S_IRUGO, amdgpu_get_pp_num_states, NULL);
-static DEVICE_ATTR(pp_cur_state, S_IRUGO, amdgpu_get_pp_cur_state, NULL);
-static DEVICE_ATTR(pp_force_state, S_IRUGO | S_IWUSR,
-               amdgpu_get_pp_force_state,
-               amdgpu_set_pp_force_state);
-static DEVICE_ATTR(pp_table, S_IRUGO | S_IWUSR,
-               amdgpu_get_pp_table,
-               amdgpu_set_pp_table);
-static DEVICE_ATTR(pp_dpm_sclk, S_IRUGO | S_IWUSR,
-               amdgpu_get_pp_dpm_sclk,
-               amdgpu_set_pp_dpm_sclk);
-static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR,
-               amdgpu_get_pp_dpm_mclk,
-               amdgpu_set_pp_dpm_mclk);
-static DEVICE_ATTR(pp_dpm_socclk, S_IRUGO | S_IWUSR,
-               amdgpu_get_pp_dpm_socclk,
-               amdgpu_set_pp_dpm_socclk);
-static DEVICE_ATTR(pp_dpm_fclk, S_IRUGO | S_IWUSR,
-               amdgpu_get_pp_dpm_fclk,
-               amdgpu_set_pp_dpm_fclk);
-static DEVICE_ATTR(pp_dpm_dcefclk, S_IRUGO | S_IWUSR,
-               amdgpu_get_pp_dpm_dcefclk,
-               amdgpu_set_pp_dpm_dcefclk);
-static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR,
-               amdgpu_get_pp_dpm_pcie,
-               amdgpu_set_pp_dpm_pcie);
-static DEVICE_ATTR(pp_sclk_od, S_IRUGO | S_IWUSR,
-               amdgpu_get_pp_sclk_od,
-               amdgpu_set_pp_sclk_od);
-static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR,
-               amdgpu_get_pp_mclk_od,
-               amdgpu_set_pp_mclk_od);
-static DEVICE_ATTR(pp_power_profile_mode, S_IRUGO | S_IWUSR,
-               amdgpu_get_pp_power_profile_mode,
-               amdgpu_set_pp_power_profile_mode);
-static DEVICE_ATTR(pp_od_clk_voltage, S_IRUGO | S_IWUSR,
-               amdgpu_get_pp_od_clk_voltage,
-               amdgpu_set_pp_od_clk_voltage);
-static DEVICE_ATTR(gpu_busy_percent, S_IRUGO,
-               amdgpu_get_busy_percent, NULL);
-static DEVICE_ATTR(mem_busy_percent, S_IRUGO,
-               amdgpu_get_memory_busy_percent, NULL);
-static DEVICE_ATTR(pcie_bw, S_IRUGO, amdgpu_get_pcie_bw, NULL);
-static DEVICE_ATTR(pp_features, S_IRUGO | S_IWUSR,
-               amdgpu_get_pp_feature_status,
-               amdgpu_set_pp_feature_status);
-static DEVICE_ATTR(unique_id, S_IRUGO, amdgpu_get_unique_id, NULL);
+static struct amdgpu_device_attr amdgpu_device_attrs[] = {
+       AMDGPU_DEVICE_ATTR_RW(power_dpm_state,                          ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+       AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level,        ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+       AMDGPU_DEVICE_ATTR_RO(pp_num_states,                            ATTR_FLAG_BASIC),
+       AMDGPU_DEVICE_ATTR_RO(pp_cur_state,                             ATTR_FLAG_BASIC),
+       AMDGPU_DEVICE_ATTR_RW(pp_force_state,                           ATTR_FLAG_BASIC),
+       AMDGPU_DEVICE_ATTR_RW(pp_table,                                 ATTR_FLAG_BASIC),
+       AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk,                              ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+       AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk,                              ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+       AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk,                            ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+       AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk,                              ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+       AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk,                           ATTR_FLAG_BASIC),
+       AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie,                              ATTR_FLAG_BASIC),
+       AMDGPU_DEVICE_ATTR_RW(pp_sclk_od,                               ATTR_FLAG_BASIC),
+       AMDGPU_DEVICE_ATTR_RW(pp_mclk_od,                               ATTR_FLAG_BASIC),
+       AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode,                    ATTR_FLAG_BASIC),
+       AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage,                        ATTR_FLAG_BASIC),
+       AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent,                         ATTR_FLAG_BASIC),
+       AMDGPU_DEVICE_ATTR_RO(mem_busy_percent,                         ATTR_FLAG_BASIC),
+       AMDGPU_DEVICE_ATTR_RO(pcie_bw,                                  ATTR_FLAG_BASIC),
+       AMDGPU_DEVICE_ATTR_RW(pp_features,                              ATTR_FLAG_BASIC),
+       AMDGPU_DEVICE_ATTR_RO(unique_id,                                ATTR_FLAG_BASIC),
+};
+
+static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
+                              uint32_t mask, enum amdgpu_device_attr_states *states)
+{
+       struct device_attribute *dev_attr = &attr->dev_attr;
+       const char *attr_name = dev_attr->attr.name;
+       struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+       enum amd_asic_type asic_type = adev->asic_type;
+
+       if (!(attr->flags & mask)) {
+               *states = ATTR_STATE_UNSUPPORTED;
+               return 0;
+       }
+
+#define DEVICE_ATTR_IS(_name)  (!strcmp(attr_name, #_name))
+
+       if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
+               if (asic_type < CHIP_VEGA10)
+                       *states = ATTR_STATE_UNSUPPORTED;
+       } else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
+               if (asic_type < CHIP_VEGA10 || asic_type == CHIP_ARCTURUS)
+                       *states = ATTR_STATE_UNSUPPORTED;
+       } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
+               if (asic_type < CHIP_VEGA20)
+                       *states = ATTR_STATE_UNSUPPORTED;
+       } else if (DEVICE_ATTR_IS(pp_dpm_pcie)) {
+               if (asic_type == CHIP_ARCTURUS)
+                       *states = ATTR_STATE_UNSUPPORTED;
+       } else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) {
+               *states = ATTR_STATE_UNSUPPORTED;
+               if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
+                   (!is_support_sw_smu(adev) && hwmgr->od_enabled))
+                       *states = ATTR_STATE_SUPPORTED;
+       } else if (DEVICE_ATTR_IS(mem_busy_percent)) {
+               if (adev->flags & AMD_IS_APU || asic_type == CHIP_VEGA10)
+                       *states = ATTR_STATE_UNSUPPORTED;
+       } else if (DEVICE_ATTR_IS(pcie_bw)) {
+               /* PCIe Perf counters won't work on APU nodes */
+               if (adev->flags & AMD_IS_APU)
+                       *states = ATTR_STATE_UNSUPPORTED;
+       } else if (DEVICE_ATTR_IS(unique_id)) {
+               if (!adev->unique_id)
+                       *states = ATTR_STATE_UNSUPPORTED;
+       } else if (DEVICE_ATTR_IS(pp_features)) {
+               if (adev->flags & AMD_IS_APU || asic_type < CHIP_VEGA10)
+                       *states = ATTR_STATE_UNSUPPORTED;
+       }
+
+       if (asic_type == CHIP_ARCTURUS) {
+               /* Arcturus does not support standalone mclk/socclk/fclk level setting */
+               if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
+                   DEVICE_ATTR_IS(pp_dpm_socclk) ||
+                   DEVICE_ATTR_IS(pp_dpm_fclk)) {
+                       dev_attr->attr.mode &= ~S_IWUGO;
+                       dev_attr->store = NULL;
+               }
+       }
+
+#undef DEVICE_ATTR_IS
+
+       return 0;
+}
+
+
+static int amdgpu_device_attr_create(struct amdgpu_device *adev,
+                                    struct amdgpu_device_attr *attr,
+                                    uint32_t mask, struct list_head *attr_list)
+{
+       int ret = 0;
+       struct device_attribute *dev_attr = &attr->dev_attr;
+       const char *name = dev_attr->attr.name;
+       enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED;
+       struct amdgpu_device_attr_entry *attr_entry;
+
+       int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
+                          uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update;
+
+       BUG_ON(!attr);
+
+       attr_update = attr->attr_update ? attr_update : default_attr_update;
+
+       ret = attr_update(adev, attr, mask, &attr_states);
+       if (ret) {
+               dev_err(adev->dev, "failed to update device file %s, ret = %d\n",
+                       name, ret);
+               return ret;
+       }
+
+       if (attr_states == ATTR_STATE_UNSUPPORTED)
+               return 0;
+
+       ret = device_create_file(adev->dev, dev_attr);
+       if (ret) {
+               dev_err(adev->dev, "failed to create device file %s, ret = %d\n",
+                       name, ret);
+       }
+
+       attr_entry = kmalloc(sizeof(*attr_entry), GFP_KERNEL);
+       if (!attr_entry)
+               return -ENOMEM;
+
+       attr_entry->attr = attr;
+       INIT_LIST_HEAD(&attr_entry->entry);
+
+       list_add_tail(&attr_entry->entry, attr_list);
+
+       return ret;
+}
+
+static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr)
+{
+       struct device_attribute *dev_attr = &attr->dev_attr;
+
+       device_remove_file(adev->dev, dev_attr);
+}
+
+static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
+                                            struct list_head *attr_list);
+
+static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev,
+                                           struct amdgpu_device_attr *attrs,
+                                           uint32_t counts,
+                                           uint32_t mask,
+                                           struct list_head *attr_list)
+{
+       int ret = 0;
+       uint32_t i = 0;
+
+       for (i = 0; i < counts; i++) {
+               ret = amdgpu_device_attr_create(adev, &attrs[i], mask, attr_list);
+               if (ret)
+                       goto failed;
+       }
+
+       return 0;
+
+failed:
+       amdgpu_device_attr_remove_groups(adev, attr_list);
+
+       return ret;
+}
+
+static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
+                                            struct list_head *attr_list)
+{
+       struct amdgpu_device_attr_entry *entry, *entry_tmp;
+
+       if (list_empty(attr_list))
+               return ;
+
+       list_for_each_entry_safe(entry, entry_tmp, attr_list, entry) {
+               amdgpu_device_attr_remove(adev, entry->attr);
+               list_del(&entry->entry);
+               kfree(entry);
+       }
+}
 
 static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
                                      struct device_attribute *attr,
@@ -3238,8 +3279,8 @@ int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_versio
 
 int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
 {
-       struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
        int ret;
+       uint32_t mask = 0;
 
        if (adev->pm.sysfs_initialized)
                return 0;
@@ -3247,6 +3288,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
        if (adev->pm.dpm_enabled == 0)
                return 0;
 
+       INIT_LIST_HEAD(&adev->pm.pm_attr_list);
+
        adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
                                                                   DRIVER_NAME, adev,
                                                                   hwmon_groups);
@@ -3257,160 +3300,26 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
                return ret;
        }
 
-       ret = device_create_file(adev->dev, &dev_attr_power_dpm_state);
-       if (ret) {
-               DRM_ERROR("failed to create device file for dpm state\n");
-               return ret;
-       }
-       ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
-       if (ret) {
-               DRM_ERROR("failed to create device file for dpm state\n");
-               return ret;
-       }
-
-
-       ret = device_create_file(adev->dev, &dev_attr_pp_num_states);
-       if (ret) {
-               DRM_ERROR("failed to create device file pp_num_states\n");
-               return ret;
-       }
-       ret = device_create_file(adev->dev, &dev_attr_pp_cur_state);
-       if (ret) {
-               DRM_ERROR("failed to create device file pp_cur_state\n");
-               return ret;
-       }
-       ret = device_create_file(adev->dev, &dev_attr_pp_force_state);
-       if (ret) {
-               DRM_ERROR("failed to create device file pp_force_state\n");
-               return ret;
-       }
-       ret = device_create_file(adev->dev, &dev_attr_pp_table);
-       if (ret) {
-               DRM_ERROR("failed to create device file pp_table\n");
-               return ret;
-       }
-
-       ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
-       if (ret) {
-               DRM_ERROR("failed to create device file pp_dpm_sclk\n");
-               return ret;
-       }
-
-       /* Arcturus does not support standalone mclk/socclk/fclk level setting */
-       if (adev->asic_type == CHIP_ARCTURUS) {
-               dev_attr_pp_dpm_mclk.attr.mode &= ~S_IWUGO;
-               dev_attr_pp_dpm_mclk.store = NULL;
-
-               dev_attr_pp_dpm_socclk.attr.mode &= ~S_IWUGO;
-               dev_attr_pp_dpm_socclk.store = NULL;
-
-               dev_attr_pp_dpm_fclk.attr.mode &= ~S_IWUGO;
-               dev_attr_pp_dpm_fclk.store = NULL;
+       switch (amdgpu_virt_get_sriov_vf_mode(adev)) {
+       case SRIOV_VF_MODE_ONE_VF:
+               mask = ATTR_FLAG_ONEVF;
+               break;
+       case SRIOV_VF_MODE_MULTI_VF:
+               mask = 0;
+               break;
+       case SRIOV_VF_MODE_BARE_METAL:
+       default:
+               mask = ATTR_FLAG_MASK_ALL;
+               break;
        }
 
-       ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
-       if (ret) {
-               DRM_ERROR("failed to create device file pp_dpm_mclk\n");
-               return ret;
-       }
-       if (adev->asic_type >= CHIP_VEGA10) {
-               ret = device_create_file(adev->dev, &dev_attr_pp_dpm_socclk);
-               if (ret) {
-                       DRM_ERROR("failed to create device file pp_dpm_socclk\n");
-                       return ret;
-               }
-               if (adev->asic_type != CHIP_ARCTURUS) {
-                       ret = device_create_file(adev->dev, &dev_attr_pp_dpm_dcefclk);
-                       if (ret) {
-                               DRM_ERROR("failed to create device file pp_dpm_dcefclk\n");
-                               return ret;
-                       }
-               }
-       }
-       if (adev->asic_type >= CHIP_VEGA20) {
-               ret = device_create_file(adev->dev, &dev_attr_pp_dpm_fclk);
-               if (ret) {
-                       DRM_ERROR("failed to create device file pp_dpm_fclk\n");
-                       return ret;
-               }
-       }
-       if (adev->asic_type != CHIP_ARCTURUS) {
-               ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie);
-               if (ret) {
-                       DRM_ERROR("failed to create device file pp_dpm_pcie\n");
-                       return ret;
-               }
-       }
-       ret = device_create_file(adev->dev, &dev_attr_pp_sclk_od);
-       if (ret) {
-               DRM_ERROR("failed to create device file pp_sclk_od\n");
-               return ret;
-       }
-       ret = device_create_file(adev->dev, &dev_attr_pp_mclk_od);
-       if (ret) {
-               DRM_ERROR("failed to create device file pp_mclk_od\n");
-               return ret;
-       }
-       ret = device_create_file(adev->dev,
-                       &dev_attr_pp_power_profile_mode);
-       if (ret) {
-               DRM_ERROR("failed to create device file "
-                               "pp_power_profile_mode\n");
-               return ret;
-       }
-       if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
-           (!is_support_sw_smu(adev) && hwmgr->od_enabled)) {
-               ret = device_create_file(adev->dev,
-                               &dev_attr_pp_od_clk_voltage);
-               if (ret) {
-                       DRM_ERROR("failed to create device file "
-                                       "pp_od_clk_voltage\n");
-                       return ret;
-               }
-       }
-       ret = device_create_file(adev->dev,
-                       &dev_attr_gpu_busy_percent);
-       if (ret) {
-               DRM_ERROR("failed to create device file "
-                               "gpu_busy_level\n");
-               return ret;
-       }
-       /* APU does not have its own dedicated memory */
-       if (!(adev->flags & AMD_IS_APU) &&
-            (adev->asic_type != CHIP_VEGA10)) {
-               ret = device_create_file(adev->dev,
-                               &dev_attr_mem_busy_percent);
-               if (ret) {
-                       DRM_ERROR("failed to create device file "
-                                       "mem_busy_percent\n");
-                       return ret;
-               }
-       }
-       /* PCIe Perf counters won't work on APU nodes */
-       if (!(adev->flags & AMD_IS_APU)) {
-               ret = device_create_file(adev->dev, &dev_attr_pcie_bw);
-               if (ret) {
-                       DRM_ERROR("failed to create device file pcie_bw\n");
-                       return ret;
-               }
-       }
-       if (adev->unique_id)
-               ret = device_create_file(adev->dev, &dev_attr_unique_id);
-       if (ret) {
-               DRM_ERROR("failed to create device file unique_id\n");
+       ret = amdgpu_device_attr_create_groups(adev,
+                                              amdgpu_device_attrs,
+                                              ARRAY_SIZE(amdgpu_device_attrs),
+                                              mask,
+                                              &adev->pm.pm_attr_list);
+       if (ret)
                return ret;
-       }
-
-       if ((adev->asic_type >= CHIP_VEGA10) &&
-           !(adev->flags & AMD_IS_APU)) {
-               ret = device_create_file(adev->dev,
-                               &dev_attr_pp_features);
-               if (ret) {
-                       DRM_ERROR("failed to create device file "
-                                       "pp_features\n");
-                       return ret;
-               }
-       }
 
        adev->pm.sysfs_initialized = true;
 
@@ -3419,51 +3328,13 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
 
 void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
 {
-       struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
-
        if (adev->pm.dpm_enabled == 0)
                return;
 
        if (adev->pm.int_hwmon_dev)
                hwmon_device_unregister(adev->pm.int_hwmon_dev);
-       device_remove_file(adev->dev, &dev_attr_power_dpm_state);
-       device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
-
-       device_remove_file(adev->dev, &dev_attr_pp_num_states);
-       device_remove_file(adev->dev, &dev_attr_pp_cur_state);
-       device_remove_file(adev->dev, &dev_attr_pp_force_state);
-       device_remove_file(adev->dev, &dev_attr_pp_table);
-
-       device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
-       device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
-       if (adev->asic_type >= CHIP_VEGA10) {
-               device_remove_file(adev->dev, &dev_attr_pp_dpm_socclk);
-               if (adev->asic_type != CHIP_ARCTURUS)
-                       device_remove_file(adev->dev, &dev_attr_pp_dpm_dcefclk);
-       }
-       if (adev->asic_type != CHIP_ARCTURUS)
-               device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
-       if (adev->asic_type >= CHIP_VEGA20)
-               device_remove_file(adev->dev, &dev_attr_pp_dpm_fclk);
-       device_remove_file(adev->dev, &dev_attr_pp_sclk_od);
-       device_remove_file(adev->dev, &dev_attr_pp_mclk_od);
-       device_remove_file(adev->dev,
-                       &dev_attr_pp_power_profile_mode);
-       if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
-           (!is_support_sw_smu(adev) && hwmgr->od_enabled))
-               device_remove_file(adev->dev,
-                               &dev_attr_pp_od_clk_voltage);
-       device_remove_file(adev->dev, &dev_attr_gpu_busy_percent);
-       if (!(adev->flags & AMD_IS_APU) &&
-            (adev->asic_type != CHIP_VEGA10))
-               device_remove_file(adev->dev, &dev_attr_mem_busy_percent);
-       if (!(adev->flags & AMD_IS_APU))
-               device_remove_file(adev->dev, &dev_attr_pcie_bw);
-       if (adev->unique_id)
-               device_remove_file(adev->dev, &dev_attr_unique_id);
-       if ((adev->asic_type >= CHIP_VEGA10) &&
-           !(adev->flags & AMD_IS_APU))
-               device_remove_file(adev->dev, &dev_attr_pp_features);
+
+       amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
 }
 
 void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
index 5db0ef86e84cfd33e7b430e1cb073a5570135e55..d9ae2b49a402fef3b11e425d57d41fe74ab48f7a 100644 (file)
@@ -30,6 +30,55 @@ struct cg_flag_name
        const char *name;
 };
 
+enum amdgpu_device_attr_flags {
+       ATTR_FLAG_BASIC = (1 << 0),
+       ATTR_FLAG_ONEVF = (1 << 16),
+};
+
+#define ATTR_FLAG_TYPE_MASK    (0x0000ffff)
+#define ATTR_FLAG_MODE_MASK    (0xffff0000)
+#define ATTR_FLAG_MASK_ALL     (0xffffffff)
+
+enum amdgpu_device_attr_states {
+       ATTR_STATE_UNSUPPORTED = 0,
+       ATTR_STATE_SUPPORTED,
+};
+
+struct amdgpu_device_attr {
+       struct device_attribute dev_attr;
+       enum amdgpu_device_attr_flags flags;
+       int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
+                          uint32_t mask, enum amdgpu_device_attr_states *states);
+
+};
+
+struct amdgpu_device_attr_entry {
+       struct list_head entry;
+       struct amdgpu_device_attr *attr;
+};
+
+#define to_amdgpu_device_attr(_dev_attr) \
+       container_of(_dev_attr, struct amdgpu_device_attr, dev_attr)
+
+#define __AMDGPU_DEVICE_ATTR(_name, _mode, _show, _store, _flags, ...) \
+       { .dev_attr = __ATTR(_name, _mode, _show, _store),              \
+         .flags = _flags,                                              \
+         ##__VA_ARGS__, }
+
+#define AMDGPU_DEVICE_ATTR(_name, _mode, _flags, ...)                  \
+       __AMDGPU_DEVICE_ATTR(_name, _mode,                              \
+                            amdgpu_get_##_name, amdgpu_set_##_name,    \
+                            _flags, ##__VA_ARGS__)
+
+#define AMDGPU_DEVICE_ATTR_RW(_name, _flags, ...)                      \
+       AMDGPU_DEVICE_ATTR(_name, S_IRUGO | S_IWUSR,                    \
+                          _flags, ##__VA_ARGS__)
+
+#define AMDGPU_DEVICE_ATTR_RO(_name, _flags, ...)                      \
+       __AMDGPU_DEVICE_ATTR(_name, S_IRUGO,                            \
+                            amdgpu_get_##_name, NULL,                  \
+                            _flags, ##__VA_ARGS__)
+
 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev);
 int amdgpu_pm_sysfs_init(struct amdgpu_device *adev);
 int amdgpu_pm_virt_sysfs_init(struct amdgpu_device *adev);
index deaa26808841609569d173a8f56f20e4d24aca16..7301fdcfb8bce2ded20f147dbd80111af2349858 100644 (file)
 
 #include "amdgpu_ras.h"
 
-static void psp_set_funcs(struct amdgpu_device *adev);
-
 static int psp_sysfs_init(struct amdgpu_device *adev);
 static void psp_sysfs_fini(struct amdgpu_device *adev);
 
+static int psp_load_smu_fw(struct psp_context *psp);
+
 /*
  * Due to DF Cstate management centralized to PMFW, the firmware
  * loading sequence will be updated as below:
@@ -80,8 +80,6 @@ static int psp_early_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        struct psp_context *psp = &adev->psp;
 
-       psp_set_funcs(adev);
-
        switch (adev->asic_type) {
        case CHIP_VEGA10:
        case CHIP_VEGA12:
@@ -201,6 +199,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
        int index;
        int timeout = 2000;
        bool ras_intr = false;
+       bool skip_unsupport = false;
 
        mutex_lock(&psp->mutex);
 
@@ -232,6 +231,9 @@ psp_cmd_submit_buf(struct psp_context *psp,
                amdgpu_asic_invalidate_hdp(psp->adev, NULL);
        }
 
+       /* We allow TEE_ERROR_NOT_SUPPORTED for VMR command in SRIOV */
+       skip_unsupport = (psp->cmd_buf_mem->resp.status == 0xffff000a) && amdgpu_sriov_vf(psp->adev);
+
        /* In some cases, psp response status is not 0 even there is no
         * problem while the command is submitted. Some version of PSP FW
         * doesn't write 0 to that field.
@@ -239,7 +241,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
         * during psp initialization to avoid breaking hw_init and it doesn't
         * return -EINVAL.
         */
-       if ((psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
+       if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
                if (ucode)
                        DRM_WARN("failed to load ucode id (%d) ",
                                  ucode->ucode_id);
@@ -268,7 +270,7 @@ static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
                                 struct psp_gfx_cmd_resp *cmd,
                                 uint64_t tmr_mc, uint32_t size)
 {
-       if (psp_support_vmr_ring(psp))
+       if (amdgpu_sriov_vf(psp->adev))
                cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
        else
                cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
@@ -662,6 +664,121 @@ int psp_xgmi_initialize(struct psp_context *psp)
        return ret;
 }
 
+int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
+{
+       struct ta_xgmi_shared_memory *xgmi_cmd;
+       int ret;
+
+       xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+       memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+
+       xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
+
+       /* Invoke xgmi ta to get hive id */
+       ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
+       if (ret)
+               return ret;
+
+       *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
+
+       return 0;
+}
+
+int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
+{
+       struct ta_xgmi_shared_memory *xgmi_cmd;
+       int ret;
+
+       xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+       memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+
+       xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
+
+       /* Invoke xgmi ta to get the node id */
+       ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
+       if (ret)
+               return ret;
+
+       *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
+
+       return 0;
+}
+
+int psp_xgmi_get_topology_info(struct psp_context *psp,
+                              int number_devices,
+                              struct psp_xgmi_topology_info *topology)
+{
+       struct ta_xgmi_shared_memory *xgmi_cmd;
+       struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
+       struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
+       int i;
+       int ret;
+
+       if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
+               return -EINVAL;
+
+       xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+       memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+
+       /* Fill in the shared memory with topology information as input */
+       topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
+       xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO;
+       topology_info_input->num_nodes = number_devices;
+
+       for (i = 0; i < topology_info_input->num_nodes; i++) {
+               topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
+               topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
+               topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
+               topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
+       }
+
+       /* Invoke xgmi ta to get the topology information */
+       ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO);
+       if (ret)
+               return ret;
+
+       /* Read the output topology information from the shared memory */
+       topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
+       topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
+       for (i = 0; i < topology->num_nodes; i++) {
+               topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
+               topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
+               topology->nodes[i].is_sharing_enabled = topology_info_output->nodes[i].is_sharing_enabled;
+               topology->nodes[i].sdma_engine = topology_info_output->nodes[i].sdma_engine;
+       }
+
+       return 0;
+}
+
+int psp_xgmi_set_topology_info(struct psp_context *psp,
+                              int number_devices,
+                              struct psp_xgmi_topology_info *topology)
+{
+       struct ta_xgmi_shared_memory *xgmi_cmd;
+       struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
+       int i;
+
+       if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
+               return -EINVAL;
+
+       xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+       memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+
+       topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
+       xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
+       topology_info_input->num_nodes = number_devices;
+
+       for (i = 0; i < topology_info_input->num_nodes; i++) {
+               topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
+               topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
+               topology_info_input->nodes[i].is_sharing_enabled = 1;
+               topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
+       }
+
+       /* Invoke xgmi ta to set topology information */
+       return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
+}
+
 // ras begin
 static int psp_ras_init_shared_buf(struct psp_context *psp)
 {
@@ -744,13 +861,40 @@ static int psp_ras_unload(struct psp_context *psp)
 
 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
 {
+       struct ta_ras_shared_memory *ras_cmd;
+       int ret;
+
+       ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
+
        /*
         * TODO: bypass the loading in sriov for now
         */
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
-       return psp_ta_invoke(psp, ta_cmd_id, psp->ras.session_id);
+       ret = psp_ta_invoke(psp, ta_cmd_id, psp->ras.session_id);
+
+       if (amdgpu_ras_intr_triggered())
+               return ret;
+
+       if (ras_cmd->if_version > RAS_TA_HOST_IF_VER)
+       {
+               DRM_WARN("RAS: Unsupported Interface");
+               return -EINVAL;
+       }
+
+       if (!ret) {
+               if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) {
+                       dev_warn(psp->adev->dev, "ECC switch disabled\n");
+
+                       ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE;
+               }
+               else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
+                       dev_warn(psp->adev->dev,
+                                "RAS internal register access blocked\n");
+       }
+
+       return ret;
 }
 
 int psp_ras_enable_features(struct psp_context *psp,
@@ -834,6 +978,33 @@ static int psp_ras_initialize(struct psp_context *psp)
 
        return 0;
 }
+
+int psp_ras_trigger_error(struct psp_context *psp,
+                         struct ta_ras_trigger_error_input *info)
+{
+       struct ta_ras_shared_memory *ras_cmd;
+       int ret;
+
+       if (!psp->ras.ras_initialized)
+               return -EINVAL;
+
+       ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
+       memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
+
+       ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR;
+       ras_cmd->ras_in_message.trigger_error = *info;
+
+       ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
+       if (ret)
+               return -EINVAL;
+
+       /* If err_event_athub occurs error inject was successful, however
+          return status from TA is no long reliable */
+       if (amdgpu_ras_intr_triggered())
+               return 0;
+
+       return ras_cmd->ras_status;
+}
 // ras end
 
 // HDCP start
@@ -884,6 +1055,7 @@ static int psp_hdcp_load(struct psp_context *psp)
        if (!ret) {
                psp->hdcp_context.hdcp_initialized = true;
                psp->hdcp_context.session_id = cmd->resp.session_id;
+               mutex_init(&psp->hdcp_context.mutex);
        }
 
        kfree(cmd);
@@ -1029,6 +1201,7 @@ static int psp_dtm_load(struct psp_context *psp)
        if (!ret) {
                psp->dtm_context.dtm_initialized = true;
                psp->dtm_context.session_id = cmd->resp.session_id;
+               mutex_init(&psp->dtm_context.mutex);
        }
 
        kfree(cmd);
@@ -1169,16 +1342,20 @@ static int psp_hw_start(struct psp_context *psp)
        }
 
        /*
-        * For those ASICs with DF Cstate management centralized
+        * For ASICs with DF Cstate management centralized
         * to PMFW, TMR setup should be performed after PMFW
         * loaded and before other non-psp firmware loaded.
         */
-       if (!psp->pmfw_centralized_cstate_management) {
-               ret = psp_tmr_load(psp);
-               if (ret) {
-                       DRM_ERROR("PSP load tmr failed!\n");
+       if (psp->pmfw_centralized_cstate_management) {
+               ret = psp_load_smu_fw(psp);
+               if (ret)
                        return ret;
-               }
+       }
+
+       ret = psp_tmr_load(psp);
+       if (ret) {
+               DRM_ERROR("PSP load tmr failed!\n");
+               return ret;
        }
 
        return 0;
@@ -1355,7 +1532,7 @@ static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode,
 }
 
 static int psp_execute_np_fw_load(struct psp_context *psp,
-                              struct amdgpu_firmware_info *ucode)
+                                 struct amdgpu_firmware_info *ucode)
 {
        int ret = 0;
 
@@ -1369,64 +1546,96 @@ static int psp_execute_np_fw_load(struct psp_context *psp,
        return ret;
 }
 
+static int psp_load_smu_fw(struct psp_context *psp)
+{
+       int ret;
+       struct amdgpu_device* adev = psp->adev;
+       struct amdgpu_firmware_info *ucode =
+                       &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
+       struct amdgpu_ras *ras = psp->ras.ras;
+
+       if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
+               return 0;
+
+
+       if (adev->in_gpu_reset && ras && ras->supported) {
+               ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
+               if (ret) {
+                       DRM_WARN("Failed to set MP1 state prepare for reload\n");
+               }
+       }
+
+       ret = psp_execute_np_fw_load(psp, ucode);
+
+       if (ret)
+               DRM_ERROR("PSP load smu failed!\n");
+
+       return ret;
+}
+
+static bool fw_load_skip_check(struct psp_context *psp,
+                              struct amdgpu_firmware_info *ucode)
+{
+       if (!ucode->fw)
+               return true;
+
+       if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
+           (psp_smu_reload_quirk(psp) ||
+            psp->autoload_supported ||
+            psp->pmfw_centralized_cstate_management))
+               return true;
+
+       if (amdgpu_sriov_vf(psp->adev) &&
+          (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA0
+           || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1
+           || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2
+           || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3
+           || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA4
+           || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA5
+           || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA6
+           || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA7
+           || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G
+           || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
+           || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
+           || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
+           || ucode->ucode_id == AMDGPU_UCODE_ID_SMC))
+               /*skip ucode loading in SRIOV VF */
+               return true;
+
+       if (psp->autoload_supported &&
+           (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
+            ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
+               /* skip mec JT when autoload is enabled */
+               return true;
+
+       return false;
+}
+
 static int psp_np_fw_load(struct psp_context *psp)
 {
        int i, ret;
        struct amdgpu_firmware_info *ucode;
        struct amdgpu_device* adev = psp->adev;
 
-       if (psp->autoload_supported ||
-           psp->pmfw_centralized_cstate_management) {
-               ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
-               if (!ucode->fw || amdgpu_sriov_vf(adev))
-                       goto out;
-
-               ret = psp_execute_np_fw_load(psp, ucode);
+       if (psp->autoload_supported &&
+           !psp->pmfw_centralized_cstate_management) {
+               ret = psp_load_smu_fw(psp);
                if (ret)
                        return ret;
        }
 
-       if (psp->pmfw_centralized_cstate_management) {
-               ret = psp_tmr_load(psp);
-               if (ret) {
-                       DRM_ERROR("PSP load tmr failed!\n");
-                       return ret;
-               }
-       }
-
-out:
        for (i = 0; i < adev->firmware.max_ucodes; i++) {
                ucode = &adev->firmware.ucode[i];
-               if (!ucode->fw)
-                       continue;
 
                if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
-                   (psp_smu_reload_quirk(psp) ||
-                    psp->autoload_supported ||
-                    psp->pmfw_centralized_cstate_management))
-                       continue;
-
-               if (amdgpu_sriov_vf(adev) &&
-                  (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA0
-                   || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1
-                   || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2
-                   || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3
-                   || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA4
-                   || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA5
-                   || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA6
-                   || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA7
-                    || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G
-                   || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
-                   || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
-                   || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
-                   || ucode->ucode_id == AMDGPU_UCODE_ID_SMC))
-                       /*skip ucode loading in SRIOV VF */
+                   !fw_load_skip_check(psp, ucode)) {
+                       ret = psp_load_smu_fw(psp);
+                       if (ret)
+                               return ret;
                        continue;
+               }
 
-               if (psp->autoload_supported &&
-                   (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
-                    ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
-                       /* skip mec JT when autoload is enabled */
+               if (fw_load_skip_check(psp, ucode))
                        continue;
 
                psp_print_fw_hdr(psp, ucode);
@@ -1438,17 +1647,12 @@ out:
                /* Start rlc autoload after psp recieved all the gfx firmware */
                if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
                    AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_G)) {
-                       ret = psp_rlc_autoload(psp);
+                       ret = psp_rlc_autoload_start(psp);
                        if (ret) {
                                DRM_ERROR("Failed to start rlc autoload\n");
                                return ret;
                        }
                }
-#if 0
-               /* check if firmware loaded sucessfully */
-               if (!amdgpu_psp_check_fw_loading_status(adev, i))
-                       return -EINVAL;
-#endif
        }
 
        return 0;
@@ -1806,19 +2010,110 @@ int psp_ring_cmd_submit(struct psp_context *psp,
        return 0;
 }
 
-static bool psp_check_fw_loading_status(struct amdgpu_device *adev,
-                                       enum AMDGPU_UCODE_ID ucode_type)
+int psp_init_asd_microcode(struct psp_context *psp,
+                          const char *chip_name)
 {
-       struct amdgpu_firmware_info *ucode = NULL;
+       struct amdgpu_device *adev = psp->adev;
+       char fw_name[30];
+       const struct psp_firmware_header_v1_0 *asd_hdr;
+       int err = 0;
 
-       if (!adev->firmware.fw_size)
-               return false;
+       if (!chip_name) {
+               dev_err(adev->dev, "invalid chip name for asd microcode\n");
+               return -EINVAL;
+       }
 
-       ucode = &adev->firmware.ucode[ucode_type];
-       if (!ucode->fw || !ucode->ucode_size)
-               return false;
+       snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
+       err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
+       if (err)
+               goto out;
+
+       err = amdgpu_ucode_validate(adev->psp.asd_fw);
+       if (err)
+               goto out;
+
+       asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
+       adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
+       adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version);
+       adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
+       adev->psp.asd_start_addr = (uint8_t *)asd_hdr +
+                               le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
+       return 0;
+out:
+       dev_err(adev->dev, "fail to initialize asd microcode\n");
+       release_firmware(adev->psp.asd_fw);
+       adev->psp.asd_fw = NULL;
+       return err;
+}
+
+int psp_init_sos_microcode(struct psp_context *psp,
+                          const char *chip_name)
+{
+       struct amdgpu_device *adev = psp->adev;
+       char fw_name[30];
+       const struct psp_firmware_header_v1_0 *sos_hdr;
+       const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
+       const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
+       int err = 0;
+
+       if (!chip_name) {
+               dev_err(adev->dev, "invalid chip name for sos microcode\n");
+               return -EINVAL;
+       }
+
+       snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name);
+       err = request_firmware(&adev->psp.sos_fw, fw_name, adev->dev);
+       if (err)
+               goto out;
+
+       err = amdgpu_ucode_validate(adev->psp.sos_fw);
+       if (err)
+               goto out;
+
+       sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
+       amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
+
+       switch (sos_hdr->header.header_version_major) {
+       case 1:
+               adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
+               adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->ucode_feature_version);
+               adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos_size_bytes);
+               adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->sos_offset_bytes);
+               adev->psp.sys_start_addr = (uint8_t *)sos_hdr +
+                               le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
+               adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr +
+                               le32_to_cpu(sos_hdr->sos_offset_bytes);
+               if (sos_hdr->header.header_version_minor == 1) {
+                       sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
+                       adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_1->toc_size_bytes);
+                       adev->psp.toc_start_addr = (uint8_t *)adev->psp.sys_start_addr +
+                                       le32_to_cpu(sos_hdr_v1_1->toc_offset_bytes);
+                       adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_1->kdb_size_bytes);
+                       adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
+                                       le32_to_cpu(sos_hdr_v1_1->kdb_offset_bytes);
+               }
+               if (sos_hdr->header.header_version_minor == 2) {
+                       sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
+                       adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_2->kdb_size_bytes);
+                       adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
+                                                   le32_to_cpu(sos_hdr_v1_2->kdb_offset_bytes);
+               }
+               break;
+       default:
+               dev_err(adev->dev,
+                       "unsupported psp sos firmware\n");
+               err = -EINVAL;
+               goto out;
+       }
+
+       return 0;
+out:
+       dev_err(adev->dev,
+               "failed to init sos firmware\n");
+       release_firmware(adev->psp.sos_fw);
+       adev->psp.sos_fw = NULL;
 
-       return psp_compare_sram_data(&adev->psp, ucode, ucode_type);
+       return err;
 }
 
 static int psp_set_clockgating_state(void *handle,
@@ -1957,16 +2252,6 @@ static void psp_sysfs_fini(struct amdgpu_device *adev)
        device_remove_file(adev->dev, &dev_attr_usbc_pd_fw);
 }
 
-static const struct amdgpu_psp_funcs psp_funcs = {
-       .check_fw_loading_status = psp_check_fw_loading_status,
-};
-
-static void psp_set_funcs(struct amdgpu_device *adev)
-{
-       if (NULL == adev->firmware.funcs)
-               adev->firmware.funcs = &psp_funcs;
-}
-
 const struct amdgpu_ip_block_version psp_v3_1_ip_block =
 {
        .type = AMD_IP_BLOCK_TYPE_PSP,
index 297435c0c7c1ad630b16eebdc29ccb7aefb04f4d..2a56ad996d834ec0a1923dba90e28bb0ed83413e 100644 (file)
@@ -93,22 +93,8 @@ struct psp_funcs
                            enum psp_ring_type ring_type);
        int (*ring_destroy)(struct psp_context *psp,
                            enum psp_ring_type ring_type);
-       bool (*compare_sram_data)(struct psp_context *psp,
-                                 struct amdgpu_firmware_info *ucode,
-                                 enum AMDGPU_UCODE_ID ucode_type);
        bool (*smu_reload_quirk)(struct psp_context *psp);
        int (*mode1_reset)(struct psp_context *psp);
-       int (*xgmi_get_node_id)(struct psp_context *psp, uint64_t *node_id);
-       int (*xgmi_get_hive_id)(struct psp_context *psp, uint64_t *hive_id);
-       int (*xgmi_get_topology_info)(struct psp_context *psp, int number_devices,
-                                     struct psp_xgmi_topology_info *topology);
-       int (*xgmi_set_topology_info)(struct psp_context *psp, int number_devices,
-                                     struct psp_xgmi_topology_info *topology);
-       bool (*support_vmr_ring)(struct psp_context *psp);
-       int (*ras_trigger_error)(struct psp_context *psp,
-                       struct ta_ras_trigger_error_input *info);
-       int (*ras_cure_posion)(struct psp_context *psp, uint64_t *mode_ptr);
-       int (*rlc_autoload_start)(struct psp_context *psp);
        int (*mem_training_init)(struct psp_context *psp);
        void (*mem_training_fini)(struct psp_context *psp);
        int (*mem_training)(struct psp_context *psp, uint32_t ops);
@@ -161,6 +147,7 @@ struct psp_hdcp_context {
        struct amdgpu_bo        *hdcp_shared_bo;
        uint64_t                hdcp_shared_mc_addr;
        void                    *hdcp_shared_buf;
+       struct mutex            mutex;
 };
 
 struct psp_dtm_context {
@@ -169,6 +156,7 @@ struct psp_dtm_context {
        struct amdgpu_bo        *dtm_shared_bo;
        uint64_t                dtm_shared_mc_addr;
        void                    *dtm_shared_buf;
+       struct mutex            mutex;
 };
 
 #define MEM_TRAIN_SYSTEM_SIGNATURE             0x54534942
@@ -306,8 +294,6 @@ struct amdgpu_psp_funcs {
 #define psp_ring_create(psp, type) (psp)->funcs->ring_create((psp), (type))
 #define psp_ring_stop(psp, type) (psp)->funcs->ring_stop((psp), (type))
 #define psp_ring_destroy(psp, type) ((psp)->funcs->ring_destroy((psp), (type)))
-#define psp_compare_sram_data(psp, ucode, type) \
-               (psp)->funcs->compare_sram_data((psp), (ucode), (type))
 #define psp_init_microcode(psp) \
                ((psp)->funcs->init_microcode ? (psp)->funcs->init_microcode((psp)) : 0)
 #define psp_bootloader_load_kdb(psp) \
@@ -318,22 +304,8 @@ struct amdgpu_psp_funcs {
                ((psp)->funcs->bootloader_load_sos ? (psp)->funcs->bootloader_load_sos((psp)) : 0)
 #define psp_smu_reload_quirk(psp) \
                ((psp)->funcs->smu_reload_quirk ? (psp)->funcs->smu_reload_quirk((psp)) : false)
-#define psp_support_vmr_ring(psp) \
-               ((psp)->funcs->support_vmr_ring ? (psp)->funcs->support_vmr_ring((psp)) : false)
 #define psp_mode1_reset(psp) \
                ((psp)->funcs->mode1_reset ? (psp)->funcs->mode1_reset((psp)) : false)
-#define psp_xgmi_get_node_id(psp, node_id) \
-               ((psp)->funcs->xgmi_get_node_id ? (psp)->funcs->xgmi_get_node_id((psp), (node_id)) : -EINVAL)
-#define psp_xgmi_get_hive_id(psp, hive_id) \
-               ((psp)->funcs->xgmi_get_hive_id ? (psp)->funcs->xgmi_get_hive_id((psp), (hive_id)) : -EINVAL)
-#define psp_xgmi_get_topology_info(psp, num_device, topology) \
-               ((psp)->funcs->xgmi_get_topology_info ? \
-               (psp)->funcs->xgmi_get_topology_info((psp), (num_device), (topology)) : -EINVAL)
-#define psp_xgmi_set_topology_info(psp, num_device, topology) \
-               ((psp)->funcs->xgmi_set_topology_info ?  \
-               (psp)->funcs->xgmi_set_topology_info((psp), (num_device), (topology)) : -EINVAL)
-#define psp_rlc_autoload(psp) \
-               ((psp)->funcs->rlc_autoload_start ? (psp)->funcs->rlc_autoload_start((psp)) : 0)
 #define psp_mem_training_init(psp) \
        ((psp)->funcs->mem_training_init ? (psp)->funcs->mem_training_init((psp)) : 0)
 #define psp_mem_training_fini(psp) \
@@ -341,15 +313,6 @@ struct amdgpu_psp_funcs {
 #define psp_mem_training(psp, ops) \
        ((psp)->funcs->mem_training ? (psp)->funcs->mem_training((psp), (ops)) : 0)
 
-#define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i))
-
-#define psp_ras_trigger_error(psp, info) \
-       ((psp)->funcs->ras_trigger_error ? \
-       (psp)->funcs->ras_trigger_error((psp), (info)) : -EINVAL)
-#define psp_ras_cure_posion(psp, addr) \
-       ((psp)->funcs->ras_cure_posion ? \
-       (psp)->funcs->ras_cure_posion(psp, (addr)) : -EINVAL)
-
 #define psp_ring_get_wptr(psp) (psp)->funcs->ring_get_wptr((psp))
 #define psp_ring_set_wptr(psp, value) (psp)->funcs->ring_set_wptr((psp), (value))
 
@@ -377,10 +340,21 @@ int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx,
 int psp_xgmi_initialize(struct psp_context *psp);
 int psp_xgmi_terminate(struct psp_context *psp);
 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
+int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id);
+int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id);
+int psp_xgmi_get_topology_info(struct psp_context *psp,
+                              int number_devices,
+                              struct psp_xgmi_topology_info *topology);
+int psp_xgmi_set_topology_info(struct psp_context *psp,
+                              int number_devices,
+                              struct psp_xgmi_topology_info *topology);
 
 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
 int psp_ras_enable_features(struct psp_context *psp,
                union ta_ras_cmd_input *info, bool enable);
+int psp_ras_trigger_error(struct psp_context *psp,
+                         struct ta_ras_trigger_error_input *info);
+
 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
 
@@ -393,4 +367,8 @@ int psp_ring_cmd_submit(struct psp_context *psp,
                        uint64_t cmd_buf_mc_addr,
                        uint64_t fence_mc_addr,
                        int index);
+int psp_init_asd_microcode(struct psp_context *psp,
+                          const char *chip_name);
+int psp_init_sos_microcode(struct psp_context *psp,
+                          const char *chip_name);
 #endif
index ab379b44679cc50f862a1e680c482f03f07278ab..50fe08bf2f727656d8680fa998795d490d6df218 100644 (file)
@@ -80,6 +80,20 @@ atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
                                uint64_t addr);
 
+void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
+{
+       if (adev && amdgpu_ras_get_context(adev))
+               amdgpu_ras_get_context(adev)->error_query_ready = ready;
+}
+
+bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
+{
+       if (adev && amdgpu_ras_get_context(adev))
+               return amdgpu_ras_get_context(adev)->error_query_ready;
+
+       return false;
+}
+
 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
                                        size_t size, loff_t *pos)
 {
@@ -281,8 +295,9 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
        struct ras_debug_if data;
        int ret = 0;
 
-       if (amdgpu_ras_intr_triggered()) {
-               DRM_WARN("RAS WARN: error injection currently inaccessible\n");
+       if (!amdgpu_ras_get_error_query_ready(adev)) {
+               dev_warn(adev->dev, "RAS WARN: error injection "
+                               "currently inaccessible\n");
                return size;
        }
 
@@ -310,7 +325,8 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
                /* umc ce/ue error injection for a bad page is not allowed */
                if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
                    amdgpu_ras_check_bad_page(adev, data.inject.address)) {
-                       DRM_WARN("RAS WARN: 0x%llx has been marked as bad before error injection!\n",
+                       dev_warn(adev->dev, "RAS WARN: 0x%llx has been marked "
+                                       "as bad before error injection!\n",
                                        data.inject.address);
                        break;
                }
@@ -399,7 +415,7 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
                .head = obj->head,
        };
 
-       if (amdgpu_ras_intr_triggered())
+       if (!amdgpu_ras_get_error_query_ready(obj->adev))
                return snprintf(buf, PAGE_SIZE,
                                "Query currently inaccessible\n");
 
@@ -486,6 +502,29 @@ struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
 }
 /* obj end */
 
+void amdgpu_ras_parse_status_code(struct amdgpu_device* adev,
+                                 const char*           invoke_type,
+                                 const char*           block_name,
+                                 enum ta_ras_status    ret)
+{
+       switch (ret) {
+       case TA_RAS_STATUS__SUCCESS:
+               return;
+       case TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE:
+               dev_warn(adev->dev,
+                       "RAS WARN: %s %s currently unavailable\n",
+                       invoke_type,
+                       block_name);
+               break;
+       default:
+               dev_err(adev->dev,
+                       "RAS ERROR: %s %s error failed ret 0x%X\n",
+                       invoke_type,
+                       block_name,
+                       ret);
+       }
+}
+
 /* feature ctl begin */
 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
                struct ras_common_if *head)
@@ -549,19 +588,23 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
                struct ras_common_if *head, bool enable)
 {
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
-       union ta_ras_cmd_input info;
+       union ta_ras_cmd_input *info;
        int ret;
 
        if (!con)
                return -EINVAL;
 
+        info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
+       if (!info)
+               return -ENOMEM;
+
        if (!enable) {
-               info.disable_features = (struct ta_ras_disable_features_input) {
+               info->disable_features = (struct ta_ras_disable_features_input) {
                        .block_id =  amdgpu_ras_block_to_ta(head->block),
                        .error_type = amdgpu_ras_error_to_ta(head->type),
                };
        } else {
-               info.enable_features = (struct ta_ras_enable_features_input) {
+               info->enable_features = (struct ta_ras_enable_features_input) {
                        .block_id =  amdgpu_ras_block_to_ta(head->block),
                        .error_type = amdgpu_ras_error_to_ta(head->type),
                };
@@ -570,26 +613,33 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
        /* Do not enable if it is not allowed. */
        WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head));
        /* Are we alerady in that state we are going to set? */
-       if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head)))
-               return 0;
+       if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head))) {
+               ret = 0;
+               goto out;
+       }
 
        if (!amdgpu_ras_intr_triggered()) {
-               ret = psp_ras_enable_features(&adev->psp, &info, enable);
+               ret = psp_ras_enable_features(&adev->psp, info, enable);
                if (ret) {
-                       DRM_ERROR("RAS ERROR: %s %s feature failed ret %d\n",
-                                       enable ? "enable":"disable",
-                                       ras_block_str(head->block),
-                                       ret);
+                       amdgpu_ras_parse_status_code(adev,
+                                                    enable ? "enable":"disable",
+                                                    ras_block_str(head->block),
+                                                   (enum ta_ras_status)ret);
                        if (ret == TA_RAS_STATUS__RESET_NEEDED)
-                               return -EAGAIN;
-                       return -EINVAL;
+                               ret = -EAGAIN;
+                       else
+                               ret = -EINVAL;
+
+                       goto out;
                }
        }
 
        /* setup the obj */
        __amdgpu_ras_feature_enable(adev, head, enable);
-
-       return 0;
+       ret = 0;
+out:
+       kfree(info);
+       return ret;
 }
 
 /* Only used in device probe stage and called only once. */
@@ -618,7 +668,8 @@ int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
                        if (ret == -EINVAL) {
                                ret = __amdgpu_ras_feature_enable(adev, head, 1);
                                if (!ret)
-                                       DRM_INFO("RAS INFO: %s setup object\n",
+                                       dev_info(adev->dev,
+                                               "RAS INFO: %s setup object\n",
                                                ras_block_str(head->block));
                        }
                } else {
@@ -744,17 +795,48 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
        info->ce_count = obj->err_data.ce_count;
 
        if (err_data.ce_count) {
-               dev_info(adev->dev, "%ld correctable errors detected in %s block\n",
-                        obj->err_data.ce_count, ras_block_str(info->head.block));
+               dev_info(adev->dev, "%ld correctable hardware errors "
+                                       "detected in %s block, no user "
+                                       "action is needed.\n",
+                                       obj->err_data.ce_count,
+                                       ras_block_str(info->head.block));
        }
        if (err_data.ue_count) {
-               dev_info(adev->dev, "%ld uncorrectable errors detected in %s block\n",
-                        obj->err_data.ue_count, ras_block_str(info->head.block));
+               dev_info(adev->dev, "%ld uncorrectable hardware errors "
+                                       "detected in %s block\n",
+                                       obj->err_data.ue_count,
+                                       ras_block_str(info->head.block));
        }
 
        return 0;
 }
 
+/* Trigger XGMI/WAFL error */
+int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev,
+                                struct ta_ras_trigger_error_input *block_info)
+{
+       int ret;
+
+       if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
+               dev_warn(adev->dev, "Failed to disallow df cstate");
+
+       if (amdgpu_dpm_allow_xgmi_power_down(adev, false))
+               dev_warn(adev->dev, "Failed to disallow XGMI power down");
+
+       ret = psp_ras_trigger_error(&adev->psp, block_info);
+
+       if (amdgpu_ras_intr_triggered())
+               return ret;
+
+       if (amdgpu_dpm_allow_xgmi_power_down(adev, true))
+               dev_warn(adev->dev, "Failed to allow XGMI power down");
+
+       if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
+               dev_warn(adev->dev, "Failed to allow df cstate");
+
+       return ret;
+}
+
 /* wrapper of psp_ras_trigger_error */
 int amdgpu_ras_error_inject(struct amdgpu_device *adev,
                struct ras_inject_if *info)
@@ -788,20 +870,22 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
                break;
        case AMDGPU_RAS_BLOCK__UMC:
        case AMDGPU_RAS_BLOCK__MMHUB:
-       case AMDGPU_RAS_BLOCK__XGMI_WAFL:
        case AMDGPU_RAS_BLOCK__PCIE_BIF:
                ret = psp_ras_trigger_error(&adev->psp, &block_info);
                break;
+       case AMDGPU_RAS_BLOCK__XGMI_WAFL:
+               ret = amdgpu_ras_error_inject_xgmi(adev, &block_info);
+               break;
        default:
-               DRM_INFO("%s error injection is not supported yet\n",
+               dev_info(adev->dev, "%s error injection is not supported yet\n",
                         ras_block_str(info->head.block));
                ret = -EINVAL;
        }
 
-       if (ret)
-               DRM_ERROR("RAS ERROR: inject %s error failed ret %d\n",
-                               ras_block_str(info->head.block),
-                               ret);
+       amdgpu_ras_parse_status_code(adev,
+                                    "inject",
+                                    ras_block_str(info->head.block),
+                                    (enum ta_ras_status)ret);
 
        return ret;
 }
@@ -1430,9 +1514,10 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
        struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, false);
 
        /* Build list of devices to query RAS related errors */
-       if  (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
+       if  (hive && adev->gmc.xgmi.num_physical_nodes > 1)
                device_list_handle = &hive->device_list;
-       } else {
+       else {
+               INIT_LIST_HEAD(&device_list);
                list_add_tail(&adev->gmc.xgmi.head, &device_list);
                device_list_handle = &device_list;
        }
@@ -1535,7 +1620,7 @@ static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
                                                        &data->bps[control->num_recs],
                                                        true,
                                                        save_count)) {
-                       DRM_ERROR("Failed to save EEPROM table data!");
+                       dev_err(adev->dev, "Failed to save EEPROM table data!");
                        return -EIO;
                }
 
@@ -1563,7 +1648,7 @@ static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
 
        if (amdgpu_ras_eeprom_process_recods(control, bps, false,
                control->num_recs)) {
-               DRM_ERROR("Failed to load EEPROM table records!");
+               dev_err(adev->dev, "Failed to load EEPROM table records!");
                ret = -EIO;
                goto out;
        }
@@ -1637,7 +1722,8 @@ int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev)
                                               AMDGPU_GPU_PAGE_SIZE,
                                               AMDGPU_GEM_DOMAIN_VRAM,
                                               &bo, NULL))
-                       DRM_WARN("RAS WARN: reserve vram for retired page %llx fail\n", bp);
+                       dev_warn(adev->dev, "RAS WARN: reserve vram for "
+                                       "retired page %llx fail\n", bp);
 
                data->bps_bo[i] = bo;
                data->last_reserved = i + 1;
@@ -1725,7 +1811,7 @@ free:
        kfree(*data);
        con->eh_data = NULL;
 out:
-       DRM_WARN("Failed to initialize ras recovery!\n");
+       dev_warn(adev->dev, "Failed to initialize ras recovery!\n");
 
        return ret;
 }
@@ -1787,18 +1873,18 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
                return;
 
        if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
-               DRM_INFO("HBM ECC is active.\n");
+               dev_info(adev->dev, "HBM ECC is active.\n");
                *hw_supported |= (1 << AMDGPU_RAS_BLOCK__UMC |
                                1 << AMDGPU_RAS_BLOCK__DF);
        } else
-               DRM_INFO("HBM ECC is not presented.\n");
+               dev_info(adev->dev, "HBM ECC is not presented.\n");
 
        if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
-               DRM_INFO("SRAM ECC is active.\n");
+               dev_info(adev->dev, "SRAM ECC is active.\n");
                *hw_supported |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
                                1 << AMDGPU_RAS_BLOCK__DF);
        } else
-               DRM_INFO("SRAM ECC is not presented.\n");
+               dev_info(adev->dev, "SRAM ECC is not presented.\n");
 
        /* hw_supported needs to be aligned with RAS block mask. */
        *hw_supported &= AMDGPU_RAS_BLOCK_MASK;
@@ -1855,7 +1941,7 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
        if (amdgpu_ras_fs_init(adev))
                goto fs_out;
 
-       DRM_INFO("RAS INFO: ras initialized successfully, "
+       dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
                        "hardware ability[%x] ras_mask[%x]\n",
                        con->hw_supported, con->supported);
        return 0;
@@ -2037,7 +2123,8 @@ void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
                return;
 
        if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
-               DRM_WARN("RAS event of type ERREVENT_ATHUB_INTERRUPT detected!\n");
+               dev_info(adev->dev, "uncorrectable hardware error"
+                       "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
 
                amdgpu_ras_reset_gpu(adev);
        }
index 55c3eceb390d4f4656c62d8c0af064564e4c1587..e7df5d8429f825946245c9fc9fc81cb338fc8bbc 100644 (file)
@@ -334,6 +334,8 @@ struct amdgpu_ras {
        uint32_t flags;
        bool reboot;
        struct amdgpu_ras_eeprom_control eeprom_control;
+
+       bool error_query_ready;
 };
 
 struct ras_fs_data {
@@ -629,4 +631,6 @@ static inline void amdgpu_ras_intr_cleared(void)
 
 void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev);
 
+void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready);
+
 #endif
index a7e1d0425ed08b74d640a8fd8a9bd4820d7ec30e..13ea8ebc421c6e47bad20b525ab525883c3d852f 100644 (file)
@@ -162,11 +162,13 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
  * Returns 0 on success, error on failure.
  */
 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
-                    unsigned max_dw, struct amdgpu_irq_src *irq_src,
-                    unsigned irq_type)
+                    unsigned int max_dw, struct amdgpu_irq_src *irq_src,
+                    unsigned int irq_type, unsigned int hw_prio)
 {
        int r, i;
        int sched_hw_submission = amdgpu_sched_hw_submission;
+       u32 *num_sched;
+       u32 hw_ip;
 
        /* Set the hw submission limit higher for KIQ because
         * it's used for a number of gfx/compute tasks by both
@@ -258,6 +260,13 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
        ring->priority = DRM_SCHED_PRIORITY_NORMAL;
        mutex_init(&ring->priority_mutex);
 
+       if (!ring->no_scheduler) {
+               hw_ip = ring->funcs->type;
+               num_sched = &adev->gpu_sched[hw_ip][hw_prio].num_scheds;
+               adev->gpu_sched[hw_ip][hw_prio].sched[(*num_sched)++] =
+                       &ring->sched;
+       }
+
        for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i)
                atomic_set(&ring->num_jobs[i], 0);
 
index 9a443013d70d5d4a94099206eb84ecb1662f7b6a..be218754629ab85b02000c9ea6d379ab44ad8148 100644 (file)
 
 /* max number of rings */
 #define AMDGPU_MAX_RINGS               28
+#define AMDGPU_MAX_HWIP_RINGS          8
 #define AMDGPU_MAX_GFX_RINGS           2
 #define AMDGPU_MAX_COMPUTE_RINGS       8
 #define AMDGPU_MAX_VCE_RINGS           3
 #define AMDGPU_MAX_UVD_ENC_RINGS       2
 
+#define AMDGPU_RING_PRIO_DEFAULT       1
+#define AMDGPU_RING_PRIO_MAX           AMDGPU_GFX_PIPE_PRIO_MAX
+
 /* some special values for the owner field */
 #define AMDGPU_FENCE_OWNER_UNDEFINED   ((void *)0ul)
 #define AMDGPU_FENCE_OWNER_VM          ((void *)1ul)
 
 #define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
 
+#define AMDGPU_IB_POOL_SIZE    (1024 * 1024)
+
 enum amdgpu_ring_type {
-       AMDGPU_RING_TYPE_GFX,
-       AMDGPU_RING_TYPE_COMPUTE,
-       AMDGPU_RING_TYPE_SDMA,
-       AMDGPU_RING_TYPE_UVD,
-       AMDGPU_RING_TYPE_VCE,
-       AMDGPU_RING_TYPE_KIQ,
-       AMDGPU_RING_TYPE_UVD_ENC,
-       AMDGPU_RING_TYPE_VCN_DEC,
-       AMDGPU_RING_TYPE_VCN_ENC,
-       AMDGPU_RING_TYPE_VCN_JPEG
+       AMDGPU_RING_TYPE_GFX            = AMDGPU_HW_IP_GFX,
+       AMDGPU_RING_TYPE_COMPUTE        = AMDGPU_HW_IP_COMPUTE,
+       AMDGPU_RING_TYPE_SDMA           = AMDGPU_HW_IP_DMA,
+       AMDGPU_RING_TYPE_UVD            = AMDGPU_HW_IP_UVD,
+       AMDGPU_RING_TYPE_VCE            = AMDGPU_HW_IP_VCE,
+       AMDGPU_RING_TYPE_UVD_ENC        = AMDGPU_HW_IP_UVD_ENC,
+       AMDGPU_RING_TYPE_VCN_DEC        = AMDGPU_HW_IP_VCN_DEC,
+       AMDGPU_RING_TYPE_VCN_ENC        = AMDGPU_HW_IP_VCN_ENC,
+       AMDGPU_RING_TYPE_VCN_JPEG       = AMDGPU_HW_IP_VCN_JPEG,
+       AMDGPU_RING_TYPE_KIQ
+};
+
+enum amdgpu_ib_pool_type {
+       /* Normal submissions to the top of the pipeline. */
+       AMDGPU_IB_POOL_DELAYED,
+       /* Immediate submissions to the bottom of the pipeline. */
+       AMDGPU_IB_POOL_IMMEDIATE,
+       /* Direct submission to the ring buffer during init and reset. */
+       AMDGPU_IB_POOL_DIRECT,
+
+       AMDGPU_IB_POOL_MAX
 };
 
 struct amdgpu_device;
@@ -65,6 +82,11 @@ struct amdgpu_ib;
 struct amdgpu_cs_parser;
 struct amdgpu_job;
 
+struct amdgpu_sched {
+       u32                             num_scheds;
+       struct drm_gpu_scheduler        *sched[AMDGPU_MAX_HWIP_RINGS];
+};
+
 /*
  * Fences.
  */
@@ -96,7 +118,8 @@ void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
 void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence,
                      unsigned flags);
-int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s);
+int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
+                             uint32_t timeout);
 bool amdgpu_fence_process(struct amdgpu_ring *ring);
 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
 signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
@@ -159,17 +182,20 @@ struct amdgpu_ring_funcs {
        void (*end_use)(struct amdgpu_ring *ring);
        void (*emit_switch_buffer) (struct amdgpu_ring *ring);
        void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
-       void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg);
+       void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg,
+                         uint32_t reg_val_offs);
        void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
        void (*emit_reg_wait)(struct amdgpu_ring *ring, uint32_t reg,
                              uint32_t val, uint32_t mask);
        void (*emit_reg_write_reg_wait)(struct amdgpu_ring *ring,
                                        uint32_t reg0, uint32_t reg1,
                                        uint32_t ref, uint32_t mask);
-       void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
+       void (*emit_frame_cntl)(struct amdgpu_ring *ring, bool start,
+                               bool secure);
        /* Try to soft recover the ring to make the fence signal */
        void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid);
        int (*preempt_ib)(struct amdgpu_ring *ring);
+       void (*emit_mem_sync)(struct amdgpu_ring *ring);
 };
 
 struct amdgpu_ring {
@@ -214,12 +240,12 @@ struct amdgpu_ring {
        unsigned                vm_inv_eng;
        struct dma_fence        *vmid_wait;
        bool                    has_compute_vm_bug;
+       bool                    no_scheduler;
 
        atomic_t                num_jobs[DRM_SCHED_PRIORITY_MAX];
        struct mutex            priority_mutex;
        /* protected by priority_mutex */
        int                     priority;
-       bool                    has_high_prio;
 
 #if defined(CONFIG_DEBUG_FS)
        struct dentry *ent;
@@ -241,11 +267,11 @@ struct amdgpu_ring {
 #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
 #define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
 #define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
-#define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d))
+#define amdgpu_ring_emit_rreg(r, d, o) (r)->funcs->emit_rreg((r), (d), (o))
 #define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
 #define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m))
 #define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m))
-#define amdgpu_ring_emit_tmz(r, b) (r)->funcs->emit_tmz((r), (b))
+#define amdgpu_ring_emit_frame_cntl(r, b, s) (r)->funcs->emit_frame_cntl((r), (b), (s))
 #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
 #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
 #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
@@ -257,8 +283,8 @@ void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
 void amdgpu_ring_commit(struct amdgpu_ring *ring);
 void amdgpu_ring_undo(struct amdgpu_ring *ring);
 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
-                    unsigned ring_size, struct amdgpu_irq_src *irq_src,
-                    unsigned irq_type);
+                    unsigned int ring_size, struct amdgpu_irq_src *irq_src,
+                    unsigned int irq_type, unsigned int prio);
 void amdgpu_ring_fini(struct amdgpu_ring *ring);
 void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
                                                uint32_t reg0, uint32_t val0,
index 4b352206354b884f5d6cae71d5a26af1008b5edc..e5b8fb8e75c5250427761a0466e66f65ea138aa6 100644 (file)
@@ -61,8 +61,6 @@ struct amdgpu_sdma_ras_funcs {
 
 struct amdgpu_sdma {
        struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
-       struct drm_gpu_scheduler    *sdma_sched[AMDGPU_MAX_SDMA_INSTANCES];
-       uint32_t                    num_sdma_sched;
        struct amdgpu_irq_src   trap_irq;
        struct amdgpu_irq_src   illegal_inst_irq;
        struct amdgpu_irq_src   ecc_irq;
@@ -91,7 +89,8 @@ struct amdgpu_buffer_funcs {
                                 /* dst addr in bytes */
                                 uint64_t dst_offset,
                                 /* number of byte to transfer */
-                                uint32_t byte_count);
+                                uint32_t byte_count,
+                                bool tmz);
 
        /* maximum bytes in a single operation */
        uint32_t        fill_max_bytes;
@@ -109,7 +108,7 @@ struct amdgpu_buffer_funcs {
                                 uint32_t byte_count);
 };
 
-#define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib),  (s), (d), (b))
+#define amdgpu_emit_copy_buffer(adev, ib, s, d, b, t) (adev)->mman.buffer_funcs->emit_copy_buffer((ib),  (s), (d), (b), (t))
 #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
 
 struct amdgpu_sdma_instance *
index b8639225369611a2fbdfe899bc94ef9f88af8a4a..b87ca171986af0879394d0f09634f641b006d26f 100644 (file)
@@ -249,6 +249,11 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
                    owner != AMDGPU_FENCE_OWNER_UNDEFINED)
                        continue;
 
+               /* Never sync to VM updates either. */
+               if (fence_owner == AMDGPU_FENCE_OWNER_VM &&
+                   owner != AMDGPU_FENCE_OWNER_UNDEFINED)
+                       continue;
+
                /* Ignore fences depending on the sync mode */
                switch (mode) {
                case AMDGPU_SYNC_ALWAYS:
index b158230af8db705cfee5d012fb60738bc0cb69e5..2f4d5ca9894fc361ee72bd5b168fc89102ceac42 100644 (file)
@@ -44,7 +44,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
        /* Number of tests =
         * (Total GTT - IB pool - writeback page - ring buffers) / test size
         */
-       n = adev->gmc.gart_size - AMDGPU_IB_POOL_SIZE*64*1024;
+       n = adev->gmc.gart_size - AMDGPU_IB_POOL_SIZE;
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
                if (adev->rings[i])
                        n -= adev->rings[i]->ring_size;
@@ -124,7 +124,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
                amdgpu_bo_kunmap(gtt_obj[i]);
 
                r = amdgpu_copy_buffer(ring, gart_addr, vram_addr,
-                                      size, NULL, &fence, false, false);
+                                      size, NULL, &fence, false, false, false);
 
                if (r) {
                        DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
@@ -170,7 +170,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
                amdgpu_bo_kunmap(vram_obj);
 
                r = amdgpu_copy_buffer(ring, vram_addr, gart_addr,
-                                      size, NULL, &fence, false, false);
+                                      size, NULL, &fence, false, false, false);
 
                if (r) {
                        DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
index 63e734a125fb60fb4efe2dc7f5bcc769825fc022..5da20fc166d98e2cb92c644d11d2d55d08b8563f 100644 (file)
@@ -35,7 +35,7 @@
 #define AMDGPU_JOB_GET_TIMELINE_NAME(job) \
         job->base.s_fence->finished.ops->get_timeline_name(&job->base.s_fence->finished)
 
-TRACE_EVENT(amdgpu_mm_rreg,
+TRACE_EVENT(amdgpu_device_rreg,
            TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
            TP_ARGS(did, reg, value),
            TP_STRUCT__entry(
@@ -54,7 +54,7 @@ TRACE_EVENT(amdgpu_mm_rreg,
                      (unsigned long)__entry->value)
 );
 
-TRACE_EVENT(amdgpu_mm_wreg,
+TRACE_EVENT(amdgpu_device_wreg,
            TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
            TP_ARGS(did, reg, value),
            TP_STRUCT__entry(
index b1c62da527c59bf742e75d2a1b4a28b731941ae8..9cbecd5ba814be0bc5a499dd1bcb54d16374bdd0 100644 (file)
 
 #define AMDGPU_TTM_VRAM_MAX_DW_READ    (size_t)128
 
-static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
-                            struct ttm_mem_reg *mem, unsigned num_pages,
-                            uint64_t offset, unsigned window,
-                            struct amdgpu_ring *ring,
-                            uint64_t *addr);
 
 /**
  * amdgpu_init_mem_type - Initialize a memory manager for a specific type of
@@ -277,7 +272,7 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
  *
  */
 static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
-                                              unsigned long *offset)
+                                              uint64_t *offset)
 {
        struct drm_mm_node *mm_node = mem->mm_node;
 
@@ -288,92 +283,192 @@ static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
        return mm_node;
 }
 
+/**
+ * amdgpu_ttm_map_buffer - Map memory into the GART windows
+ * @bo: buffer object to map
+ * @mem: memory object to map
+ * @mm_node: drm_mm node object to map
+ * @num_pages: number of pages to map
+ * @offset: offset into @mm_node where to start
+ * @window: which GART window to use
+ * @ring: DMA ring to use for the copy
+ * @tmz: if we should setup a TMZ enabled mapping
+ * @addr: resulting address inside the MC address space
+ *
+ * Setup one of the GART windows to access a specific piece of memory or return
+ * the physical address for local memory.
+ */
+static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
+                                struct ttm_mem_reg *mem,
+                                struct drm_mm_node *mm_node,
+                                unsigned num_pages, uint64_t offset,
+                                unsigned window, struct amdgpu_ring *ring,
+                                bool tmz, uint64_t *addr)
+{
+       struct amdgpu_device *adev = ring->adev;
+       struct amdgpu_job *job;
+       unsigned num_dw, num_bytes;
+       struct dma_fence *fence;
+       uint64_t src_addr, dst_addr;
+       void *cpu_addr;
+       uint64_t flags;
+       unsigned int i;
+       int r;
+
+       BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
+              AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
+
+       /* Map only what can't be accessed directly */
+       if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) {
+               *addr = amdgpu_mm_node_addr(bo, mm_node, mem) + offset;
+               return 0;
+       }
+
+       *addr = adev->gmc.gart_start;
+       *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
+               AMDGPU_GPU_PAGE_SIZE;
+       *addr += offset & ~PAGE_MASK;
+
+       num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
+       num_bytes = num_pages * 8;
+
+       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
+                                    AMDGPU_IB_POOL_DELAYED, &job);
+       if (r)
+               return r;
+
+       src_addr = num_dw * 4;
+       src_addr += job->ibs[0].gpu_addr;
+
+       dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
+       dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
+       amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
+                               dst_addr, num_bytes, false);
+
+       amdgpu_ring_pad_ib(ring, &job->ibs[0]);
+       WARN_ON(job->ibs[0].length_dw > num_dw);
+
+       flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem);
+       if (tmz)
+               flags |= AMDGPU_PTE_TMZ;
+
+       cpu_addr = &job->ibs[0].ptr[num_dw];
+
+       if (mem->mem_type == TTM_PL_TT) {
+               struct ttm_dma_tt *dma;
+               dma_addr_t *dma_address;
+
+               dma = container_of(bo->ttm, struct ttm_dma_tt, ttm);
+               dma_address = &dma->dma_address[offset >> PAGE_SHIFT];
+               r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
+                                   cpu_addr);
+               if (r)
+                       goto error_free;
+       } else {
+               dma_addr_t dma_address;
+
+               dma_address = (mm_node->start << PAGE_SHIFT) + offset;
+               dma_address += adev->vm_manager.vram_base_offset;
+
+               for (i = 0; i < num_pages; ++i) {
+                       r = amdgpu_gart_map(adev, i << PAGE_SHIFT, 1,
+                                           &dma_address, flags, cpu_addr);
+                       if (r)
+                               goto error_free;
+
+                       dma_address += PAGE_SIZE;
+               }
+       }
+
+       r = amdgpu_job_submit(job, &adev->mman.entity,
+                             AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
+       if (r)
+               goto error_free;
+
+       dma_fence_put(fence);
+
+       return r;
+
+error_free:
+       amdgpu_job_free(job);
+       return r;
+}
+
 /**
  * amdgpu_copy_ttm_mem_to_mem - Helper function for copy
+ * @adev: amdgpu device
+ * @src: buffer/address where to read from
+ * @dst: buffer/address where to write to
+ * @size: number of bytes to copy
+ * @tmz: if a secure copy should be used
+ * @resv: resv object to sync to
+ * @f: Returns the last fence if multiple jobs are submitted.
  *
  * The function copies @size bytes from {src->mem + src->offset} to
  * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
  * move and different for a BO to BO copy.
  *
- * @f: Returns the last fence if multiple jobs are submitted.
  */
 int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
-                              struct amdgpu_copy_mem *src,
-                              struct amdgpu_copy_mem *dst,
-                              uint64_t size,
+                              const struct amdgpu_copy_mem *src,
+                              const struct amdgpu_copy_mem *dst,
+                              uint64_t size, bool tmz,
                               struct dma_resv *resv,
                               struct dma_fence **f)
 {
+       const uint32_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
+                                       AMDGPU_GPU_PAGE_SIZE);
+
+       uint64_t src_node_size, dst_node_size, src_offset, dst_offset;
        struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
        struct drm_mm_node *src_mm, *dst_mm;
-       uint64_t src_node_start, dst_node_start, src_node_size,
-                dst_node_size, src_page_offset, dst_page_offset;
        struct dma_fence *fence = NULL;
        int r = 0;
-       const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
-                                       AMDGPU_GPU_PAGE_SIZE);
 
        if (!adev->mman.buffer_funcs_enabled) {
                DRM_ERROR("Trying to move memory with ring turned off.\n");
                return -EINVAL;
        }
 
-       src_mm = amdgpu_find_mm_node(src->mem, &src->offset);
-       src_node_start = amdgpu_mm_node_addr(src->bo, src_mm, src->mem) +
-                                            src->offset;
-       src_node_size = (src_mm->size << PAGE_SHIFT) - src->offset;
-       src_page_offset = src_node_start & (PAGE_SIZE - 1);
+       src_offset = src->offset;
+       src_mm = amdgpu_find_mm_node(src->mem, &src_offset);
+       src_node_size = (src_mm->size << PAGE_SHIFT) - src_offset;
 
-       dst_mm = amdgpu_find_mm_node(dst->mem, &dst->offset);
-       dst_node_start = amdgpu_mm_node_addr(dst->bo, dst_mm, dst->mem) +
-                                            dst->offset;
-       dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst->offset;
-       dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
+       dst_offset = dst->offset;
+       dst_mm = amdgpu_find_mm_node(dst->mem, &dst_offset);
+       dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst_offset;
 
        mutex_lock(&adev->mman.gtt_window_lock);
 
        while (size) {
-               unsigned long cur_size;
-               uint64_t from = src_node_start, to = dst_node_start;
+               uint32_t src_page_offset = src_offset & ~PAGE_MASK;
+               uint32_t dst_page_offset = dst_offset & ~PAGE_MASK;
                struct dma_fence *next;
+               uint32_t cur_size;
+               uint64_t from, to;
 
                /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
                 * begins at an offset, then adjust the size accordingly
                 */
-               cur_size = min3(min(src_node_size, dst_node_size), size,
-                               GTT_MAX_BYTES);
-               if (cur_size + src_page_offset > GTT_MAX_BYTES ||
-                   cur_size + dst_page_offset > GTT_MAX_BYTES)
-                       cur_size -= max(src_page_offset, dst_page_offset);
-
-               /* Map only what needs to be accessed. Map src to window 0 and
-                * dst to window 1
-                */
-               if (src->mem->start == AMDGPU_BO_INVALID_OFFSET) {
-                       r = amdgpu_map_buffer(src->bo, src->mem,
-                                       PFN_UP(cur_size + src_page_offset),
-                                       src_node_start, 0, ring,
-                                       &from);
-                       if (r)
-                               goto error;
-                       /* Adjust the offset because amdgpu_map_buffer returns
-                        * start of mapped page
-                        */
-                       from += src_page_offset;
-               }
+               cur_size = max(src_page_offset, dst_page_offset);
+               cur_size = min(min3(src_node_size, dst_node_size, size),
+                              (uint64_t)(GTT_MAX_BYTES - cur_size));
+
+               /* Map src to window 0 and dst to window 1. */
+               r = amdgpu_ttm_map_buffer(src->bo, src->mem, src_mm,
+                                         PFN_UP(cur_size + src_page_offset),
+                                         src_offset, 0, ring, tmz, &from);
+               if (r)
+                       goto error;
 
-               if (dst->mem->start == AMDGPU_BO_INVALID_OFFSET) {
-                       r = amdgpu_map_buffer(dst->bo, dst->mem,
-                                       PFN_UP(cur_size + dst_page_offset),
-                                       dst_node_start, 1, ring,
-                                       &to);
-                       if (r)
-                               goto error;
-                       to += dst_page_offset;
-               }
+               r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, dst_mm,
+                                         PFN_UP(cur_size + dst_page_offset),
+                                         dst_offset, 1, ring, tmz, &to);
+               if (r)
+                       goto error;
 
                r = amdgpu_copy_buffer(ring, from, to, cur_size,
-                                      resv, &next, false, true);
+                                      resv, &next, false, true, tmz);
                if (r)
                        goto error;
 
@@ -386,23 +481,20 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
 
                src_node_size -= cur_size;
                if (!src_node_size) {
-                       src_node_start = amdgpu_mm_node_addr(src->bo, ++src_mm,
-                                                            src->mem);
-                       src_node_size = (src_mm->size << PAGE_SHIFT);
-                       src_page_offset = 0;
+                       ++src_mm;
+                       src_node_size = src_mm->size << PAGE_SHIFT;
+                       src_offset = 0;
                } else {
-                       src_node_start += cur_size;
-                       src_page_offset = src_node_start & (PAGE_SIZE - 1);
+                       src_offset += cur_size;
                }
+
                dst_node_size -= cur_size;
                if (!dst_node_size) {
-                       dst_node_start = amdgpu_mm_node_addr(dst->bo, ++dst_mm,
-                                                            dst->mem);
-                       dst_node_size = (dst_mm->size << PAGE_SHIFT);
-                       dst_page_offset = 0;
+                       ++dst_mm;
+                       dst_node_size = dst_mm->size << PAGE_SHIFT;
+                       dst_offset = 0;
                } else {
-                       dst_node_start += cur_size;
-                       dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
+                       dst_offset += cur_size;
                }
        }
 error:
@@ -425,6 +517,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
                            struct ttm_mem_reg *old_mem)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+       struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
        struct amdgpu_copy_mem src, dst;
        struct dma_fence *fence = NULL;
        int r;
@@ -438,14 +531,14 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
 
        r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
                                       new_mem->num_pages << PAGE_SHIFT,
+                                      amdgpu_bo_encrypted(abo),
                                       bo->base.resv, &fence);
        if (r)
                goto error;
 
        /* clear the space being freed */
        if (old_mem->mem_type == TTM_PL_VRAM &&
-           (ttm_to_amdgpu_bo(bo)->flags &
-            AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
+           (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
                struct dma_fence *wipe_fence = NULL;
 
                r = amdgpu_fill_buffer(ttm_to_amdgpu_bo(bo), AMDGPU_POISON,
@@ -742,8 +835,8 @@ static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
 static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
                                           unsigned long page_offset)
 {
+       uint64_t offset = (page_offset << PAGE_SHIFT);
        struct drm_mm_node *mm;
-       unsigned long offset = (page_offset << PAGE_SHIFT);
 
        mm = amdgpu_find_mm_node(&bo->mem, &offset);
        return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start +
@@ -1007,6 +1100,9 @@ int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
        int r;
 
+       if (amdgpu_bo_encrypted(abo))
+               flags |= AMDGPU_PTE_TMZ;
+
        if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
                uint64_t page_idx = 1;
 
@@ -1519,6 +1615,9 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
 
        switch (bo->mem.mem_type) {
        case TTM_PL_TT:
+               if (amdgpu_bo_is_amdgpu_bo(bo) &&
+                   amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo)))
+                       return false;
                return true;
 
        case TTM_PL_VRAM:
@@ -1567,8 +1666,9 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
        if (bo->mem.mem_type != TTM_PL_VRAM)
                return -EIO;
 
-       nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset);
-       pos = (nodes->start << PAGE_SHIFT) + offset;
+       pos = offset;
+       nodes = amdgpu_find_mm_node(&abo->tbo.mem, &pos);
+       pos += (nodes->start << PAGE_SHIFT);
 
        while (len && pos < adev->gmc.mc_vram_size) {
                uint64_t aligned_pos = pos & ~(uint64_t)3;
@@ -1837,17 +1937,19 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
                return r;
 
        /*
-        * reserve one TMR (64K) memory at the top of VRAM which holds
+        * reserve TMR memory at the top of VRAM which holds
         * IP Discovery data and is protected by PSP.
         */
-       r = amdgpu_bo_create_kernel_at(adev,
-                                      adev->gmc.real_vram_size - DISCOVERY_TMR_SIZE,
-                                      DISCOVERY_TMR_SIZE,
-                                      AMDGPU_GEM_DOMAIN_VRAM,
-                                      &adev->discovery_memory,
-                                      NULL);
-       if (r)
-               return r;
+       if (adev->discovery_tmr_size > 0) {
+               r = amdgpu_bo_create_kernel_at(adev,
+                       adev->gmc.real_vram_size - adev->discovery_tmr_size,
+                       adev->discovery_tmr_size,
+                       AMDGPU_GEM_DOMAIN_VRAM,
+                       &adev->discovery_memory,
+                       NULL);
+               if (r)
+                       return r;
+       }
 
        DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
                 (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
@@ -1995,75 +2097,14 @@ int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
        return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
 }
 
-static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
-                            struct ttm_mem_reg *mem, unsigned num_pages,
-                            uint64_t offset, unsigned window,
-                            struct amdgpu_ring *ring,
-                            uint64_t *addr)
-{
-       struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
-       struct amdgpu_device *adev = ring->adev;
-       struct ttm_tt *ttm = bo->ttm;
-       struct amdgpu_job *job;
-       unsigned num_dw, num_bytes;
-       dma_addr_t *dma_address;
-       struct dma_fence *fence;
-       uint64_t src_addr, dst_addr;
-       uint64_t flags;
-       int r;
-
-       BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
-              AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
-
-       *addr = adev->gmc.gart_start;
-       *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
-               AMDGPU_GPU_PAGE_SIZE;
-
-       num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
-       num_bytes = num_pages * 8;
-
-       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job);
-       if (r)
-               return r;
-
-       src_addr = num_dw * 4;
-       src_addr += job->ibs[0].gpu_addr;
-
-       dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
-       dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
-       amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
-                               dst_addr, num_bytes);
-
-       amdgpu_ring_pad_ib(ring, &job->ibs[0]);
-       WARN_ON(job->ibs[0].length_dw > num_dw);
-
-       dma_address = &gtt->ttm.dma_address[offset >> PAGE_SHIFT];
-       flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem);
-       r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
-                           &job->ibs[0].ptr[num_dw]);
-       if (r)
-               goto error_free;
-
-       r = amdgpu_job_submit(job, &adev->mman.entity,
-                             AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
-       if (r)
-               goto error_free;
-
-       dma_fence_put(fence);
-
-       return r;
-
-error_free:
-       amdgpu_job_free(job);
-       return r;
-}
-
 int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
                       uint64_t dst_offset, uint32_t byte_count,
                       struct dma_resv *resv,
                       struct dma_fence **fence, bool direct_submit,
-                      bool vm_needs_flush)
+                      bool vm_needs_flush, bool tmz)
 {
+       enum amdgpu_ib_pool_type pool = direct_submit ? AMDGPU_IB_POOL_DIRECT :
+               AMDGPU_IB_POOL_DELAYED;
        struct amdgpu_device *adev = ring->adev;
        struct amdgpu_job *job;
 
@@ -2081,7 +2122,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
        num_loops = DIV_ROUND_UP(byte_count, max_bytes);
        num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
 
-       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
+       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, &job);
        if (r)
                return r;
 
@@ -2103,7 +2144,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
                uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
 
                amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
-                                       dst_offset, cur_size_in_bytes);
+                                       dst_offset, cur_size_in_bytes, tmz);
 
                src_offset += cur_size_in_bytes;
                dst_offset += cur_size_in_bytes;
@@ -2170,7 +2211,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
        /* for IB padding */
        num_dw += 64;
 
-       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
+       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED,
+                                    &job);
        if (r)
                return r;
 
index bd05bbb4878d96a69ebc4b28d93c6d4040fe5d53..4351d02644a7bef01177d8e0d57bf9cb72cb56a1 100644 (file)
@@ -24,8 +24,9 @@
 #ifndef __AMDGPU_TTM_H__
 #define __AMDGPU_TTM_H__
 
-#include "amdgpu.h"
+#include <linux/dma-direction.h>
 #include <drm/gpu_scheduler.h>
+#include "amdgpu.h"
 
 #define AMDGPU_PL_GDS          (TTM_PL_PRIV + 0)
 #define AMDGPU_PL_GWS          (TTM_PL_PRIV + 1)
@@ -74,6 +75,15 @@ uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
 int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man);
 
 u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo);
+int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
+                             struct ttm_mem_reg *mem,
+                             struct device *dev,
+                             enum dma_data_direction dir,
+                             struct sg_table **sgt);
+void amdgpu_vram_mgr_free_sgt(struct amdgpu_device *adev,
+                             struct device *dev,
+                             enum dma_data_direction dir,
+                             struct sg_table *sgt);
 uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
 uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
 
@@ -87,11 +97,11 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
                       uint64_t dst_offset, uint32_t byte_count,
                       struct dma_resv *resv,
                       struct dma_fence **fence, bool direct_submit,
-                      bool vm_needs_flush);
+                      bool vm_needs_flush, bool tmz);
 int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
-                              struct amdgpu_copy_mem *src,
-                              struct amdgpu_copy_mem *dst,
-                              uint64_t size,
+                              const struct amdgpu_copy_mem *src,
+                              const struct amdgpu_copy_mem *dst,
+                              uint64_t size, bool tmz,
                               struct dma_resv *resv,
                               struct dma_fence **f);
 int amdgpu_fill_buffer(struct amdgpu_bo *bo,
index 9ef3124282318f37dffcf20c2ebf98ebf6561372..65bb25e31d450ba7c194eecf3fdd9a8832dc7d23 100644 (file)
@@ -403,8 +403,8 @@ FW_VERSION_ATTR(mec_fw_version, 0444, gfx.mec_fw_version);
 FW_VERSION_ATTR(mec2_fw_version, 0444, gfx.mec2_fw_version);
 FW_VERSION_ATTR(sos_fw_version, 0444, psp.sos_fw_version);
 FW_VERSION_ATTR(asd_fw_version, 0444, psp.asd_fw_version);
-FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ta_fw_version);
-FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.ta_fw_version);
+FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ta_ras_ucode_version);
+FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.ta_xgmi_ucode_version);
 FW_VERSION_ATTR(smc_fw_version, 0444, pm.fw_version);
 FW_VERSION_ATTR(sdma_fw_version, 0444, sdma.instance[0].fw_version);
 FW_VERSION_ATTR(sdma2_fw_version, 0444, sdma.instance[1].fw_version);
index 9dd51f0d2c11be70b0c1b9cfd360fefde6a54300..af1b1ccf613c98589a266e609951ada186af7ddb 100644 (file)
@@ -110,7 +110,8 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
                 * even NOMEM error is encountered
                 */
                if(!err_data->err_addr)
-                       DRM_WARN("Failed to alloc memory for umc error address record!\n");
+                       dev_warn(adev->dev, "Failed to alloc memory for "
+                                       "umc error address record!\n");
 
                /* umc query_ras_error_address is also responsible for clearing
                 * error status
@@ -120,10 +121,14 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
 
        /* only uncorrectable error needs gpu reset */
        if (err_data->ue_count) {
+               dev_info(adev->dev, "%ld uncorrectable hardware errors "
+                               "detected in UMC block\n",
+                               err_data->ue_count);
+
                if (err_data->err_addr_cnt &&
                    amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
                                                err_data->err_addr_cnt))
-                       DRM_WARN("Failed to add ras bad page!\n");
+                       dev_warn(adev->dev, "Failed to add ras bad page!\n");
 
                amdgpu_ras_reset_gpu(adev);
        }
index 5fd32ad1c5751e9b9ca0f11fda0c937ad0d798e3..5100ebe8858d442af14f4bf1ef46cdd30eb8c5cd 100644 (file)
@@ -1056,7 +1056,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
                        goto err;
        }
 
-       r = amdgpu_job_alloc_with_ib(adev, 64, &job);
+       r = amdgpu_job_alloc_with_ib(adev, 64, direct ? AMDGPU_IB_POOL_DIRECT :
+                                    AMDGPU_IB_POOL_DELAYED, &job);
        if (r)
                goto err;
 
index 59ddba137946bd5e5a345eab215fb5c823628d3c..ecaa2d7483b20d19883b16f01e892941dc78e2b2 100644 (file)
@@ -446,7 +446,8 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
        uint64_t addr;
        int i, r;
 
-       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+                                    AMDGPU_IB_POOL_DIRECT, &job);
        if (r)
                return r;
 
@@ -524,7 +525,9 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
        struct dma_fence *f = NULL;
        int i, r;
 
-       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+                                    direct ? AMDGPU_IB_POOL_DIRECT :
+                                    AMDGPU_IB_POOL_DELAYED, &job);
        if (r)
                return r;
 
index a41272fbcba23ab7f87915b71b9d7ddbc4b32c03..2badbc0355f204d1db7a1efb91766452b831e67a 100644 (file)
@@ -56,19 +56,23 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
 
 int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
 {
-       unsigned long bo_size;
+       unsigned long bo_size, fw_shared_bo_size;
        const char *fw_name;
        const struct common_firmware_header *hdr;
        unsigned char fw_check;
        int i, r;
 
        INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
+       mutex_init(&adev->vcn.vcn_pg_lock);
+       atomic_set(&adev->vcn.total_submission_cnt, 0);
+       for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+               atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
 
        switch (adev->asic_type) {
        case CHIP_RAVEN:
-               if (adev->rev_id >= 8)
+               if (adev->apu_flags & AMD_APU_IS_RAVEN2)
                        fw_name = FIRMWARE_RAVEN2;
-               else if (adev->pdev->device == 0x15d8)
+               else if (adev->apu_flags & AMD_APU_IS_PICASSO)
                        fw_name = FIRMWARE_PICASSO;
                else
                        fw_name = FIRMWARE_RAVEN;
@@ -178,6 +182,17 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
                                return r;
                        }
                }
+
+               r = amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)),
+                               PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].fw_shared_bo,
+                               &adev->vcn.inst[i].fw_shared_gpu_addr, &adev->vcn.inst[i].fw_shared_cpu_addr);
+               if (r) {
+                       dev_err(adev->dev, "VCN %d (%d) failed to allocate firmware shared bo\n", i, r);
+                       return r;
+               }
+
+               fw_shared_bo_size = amdgpu_bo_size(adev->vcn.inst[i].fw_shared_bo);
+               adev->vcn.inst[i].saved_shm_bo = kvmalloc(fw_shared_bo_size, GFP_KERNEL);
        }
 
        return 0;
@@ -192,6 +207,12 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
        for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
                if (adev->vcn.harvest_config & (1 << j))
                        continue;
+
+               kvfree(adev->vcn.inst[j].saved_shm_bo);
+               amdgpu_bo_free_kernel(&adev->vcn.inst[j].fw_shared_bo,
+                                         &adev->vcn.inst[j].fw_shared_gpu_addr,
+                                         (void **)&adev->vcn.inst[j].fw_shared_cpu_addr);
+
                if (adev->vcn.indirect_sram) {
                        amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo,
                                                  &adev->vcn.inst[j].dpg_sram_gpu_addr,
@@ -210,6 +231,7 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
        }
 
        release_firmware(adev->vcn.fw);
+       mutex_destroy(&adev->vcn.vcn_pg_lock);
 
        return 0;
 }
@@ -236,6 +258,17 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
                        return -ENOMEM;
 
                memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
+
+               if (adev->vcn.inst[i].fw_shared_bo == NULL)
+                       return 0;
+
+               if (!adev->vcn.inst[i].saved_shm_bo)
+                       return -ENOMEM;
+
+               size = amdgpu_bo_size(adev->vcn.inst[i].fw_shared_bo);
+               ptr = adev->vcn.inst[i].fw_shared_cpu_addr;
+
+               memcpy_fromio(adev->vcn.inst[i].saved_shm_bo, ptr, size);
        }
        return 0;
 }
@@ -273,6 +306,17 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
                        }
                        memset_io(ptr, 0, size);
                }
+
+               if (adev->vcn.inst[i].fw_shared_bo == NULL)
+                       return -EINVAL;
+
+               size = amdgpu_bo_size(adev->vcn.inst[i].fw_shared_bo);
+               ptr = adev->vcn.inst[i].fw_shared_cpu_addr;
+
+               if (adev->vcn.inst[i].saved_shm_bo != NULL)
+                       memcpy_toio(ptr, adev->vcn.inst[i].saved_shm_bo, size);
+               else
+                       memset_io(ptr, 0, size);
        }
        return 0;
 }
@@ -295,7 +339,8 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
                if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)    {
                        struct dpg_pause_state new_state;
 
-                       if (fence[j])
+                       if (fence[j] ||
+                               unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt)))
                                new_state.fw_based = VCN_DPG_STATE__PAUSE;
                        else
                                new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
@@ -307,8 +352,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
                fences += fence[j];
        }
 
-       if (fences == 0) {
-               amdgpu_gfx_off_ctrl(adev, true);
+       if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) {
                amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
                       AMD_PG_STATE_GATE);
        } else {
@@ -319,36 +363,46 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
 void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
-       bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
 
-       if (set_clocks) {
-               amdgpu_gfx_off_ctrl(adev, false);
-               amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
-                      AMD_PG_STATE_UNGATE);
-       }
+       atomic_inc(&adev->vcn.total_submission_cnt);
+       cancel_delayed_work_sync(&adev->vcn.idle_work);
+
+       mutex_lock(&adev->vcn.vcn_pg_lock);
+       amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
+              AMD_PG_STATE_UNGATE);
 
        if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)    {
                struct dpg_pause_state new_state;
-               unsigned int fences = 0;
-               unsigned int i;
 
-               for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
-                       fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
-               }
-               if (fences)
+               if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
+                       atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
                        new_state.fw_based = VCN_DPG_STATE__PAUSE;
-               else
-                       new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
+               } else {
+                       unsigned int fences = 0;
+                       unsigned int i;
 
-               if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
-                       new_state.fw_based = VCN_DPG_STATE__PAUSE;
+                       for (i = 0; i < adev->vcn.num_enc_rings; ++i)
+                               fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
+
+                       if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt))
+                               new_state.fw_based = VCN_DPG_STATE__PAUSE;
+                       else
+                               new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
+               }
 
                adev->vcn.pause_dpg_mode(adev, ring->me, &new_state);
        }
+       mutex_unlock(&adev->vcn.vcn_pg_lock);
 }
 
 void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
 {
+       if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
+               ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
+               atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
+
+       atomic_dec(&ring->adev->vcn.total_submission_cnt);
+
        schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
 }
 
@@ -390,7 +444,8 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
        uint64_t addr;
        int i, r;
 
-       r = amdgpu_job_alloc_with_ib(adev, 64, &job);
+       r = amdgpu_job_alloc_with_ib(adev, 64,
+                                       AMDGPU_IB_POOL_DIRECT, &job);
        if (r)
                goto err;
 
@@ -557,7 +612,8 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
        uint64_t addr;
        int i, r;
 
-       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+                                       AMDGPU_IB_POOL_DIRECT, &job);
        if (r)
                return r;
 
@@ -610,7 +666,8 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
        uint64_t addr;
        int i, r;
 
-       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+                                       AMDGPU_IB_POOL_DIRECT, &job);
        if (r)
                return r;
 
index 6fe057329de2b1237887dbbd3946cba653833806..90aa12b2272519f8419f3930b8c4691239fece9f 100644 (file)
                }                                                                               \
        } while (0)
 
+#define AMDGPU_VCN_MULTI_QUEUE_FLAG    (1 << 8)
+
+enum fw_queue_mode {
+       FW_QUEUE_RING_RESET = 1,
+       FW_QUEUE_DPG_HOLD_OFF = 2,
+};
+
 enum engine_status_constants {
        UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON = 0x2AAAA0,
        UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON_2_0 = 0xAAAA0,
@@ -179,10 +186,15 @@ struct amdgpu_vcn_inst {
        struct amdgpu_irq_src   irq;
        struct amdgpu_vcn_reg   external;
        struct amdgpu_bo        *dpg_sram_bo;
+       struct amdgpu_bo        *fw_shared_bo;
        struct dpg_pause_state  pause_state;
        void                    *dpg_sram_cpu_addr;
        uint64_t                dpg_sram_gpu_addr;
        uint32_t                *dpg_sram_curr_addr;
+       atomic_t                dpg_enc_submission_cnt;
+       void                    *fw_shared_cpu_addr;
+       uint64_t                fw_shared_gpu_addr;
+       void                    *saved_shm_bo;
 };
 
 struct amdgpu_vcn {
@@ -196,16 +208,28 @@ struct amdgpu_vcn {
        uint8_t num_vcn_inst;
        struct amdgpu_vcn_inst   inst[AMDGPU_MAX_VCN_INSTANCES];
        struct amdgpu_vcn_reg    internal;
-       struct drm_gpu_scheduler *vcn_enc_sched[AMDGPU_MAX_VCN_ENC_RINGS];
-       struct drm_gpu_scheduler *vcn_dec_sched[AMDGPU_MAX_VCN_INSTANCES];
-       uint32_t                 num_vcn_enc_sched;
-       uint32_t                 num_vcn_dec_sched;
+       struct mutex             vcn_pg_lock;
+       atomic_t                 total_submission_cnt;
 
        unsigned        harvest_config;
        int (*pause_dpg_mode)(struct amdgpu_device *adev,
                int inst_idx, struct dpg_pause_state *new_state);
 };
 
+struct amdgpu_fw_shared_multi_queue {
+       uint8_t decode_queue_mode;
+       uint8_t encode_generalpurpose_queue_mode;
+       uint8_t encode_lowlatency_queue_mode;
+       uint8_t encode_realtime_queue_mode;
+       uint8_t padding[4];
+};
+
+struct amdgpu_fw_shared {
+       uint32_t present_flag_0;
+       uint8_t pad[53];
+       struct amdgpu_fw_shared_multi_queue multi_queue;
+} __attribute__((__packed__));
+
 int amdgpu_vcn_sw_init(struct amdgpu_device *adev);
 int amdgpu_vcn_sw_fini(struct amdgpu_device *adev);
 int amdgpu_vcn_suspend(struct amdgpu_device *adev);
index adc813cde8e281617118725a544483bc57a64d21..f3b38c9e04ca09d86db6c6a514c4508a18c73308 100644 (file)
@@ -38,7 +38,8 @@ bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
 void amdgpu_virt_init_setting(struct amdgpu_device *adev)
 {
        /* enable virtual display */
-       adev->mode_info.num_crtc = 1;
+       if (adev->mode_info.num_crtc == 0)
+               adev->mode_info.num_crtc = 1;
        adev->enable_virtual_display = true;
        adev->ddev->driver->driver_features &= ~DRIVER_ATOMIC;
        adev->cg_flags = 0;
@@ -59,7 +60,10 @@ void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
        amdgpu_ring_alloc(ring, 32);
        amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
                                            ref, mask);
-       amdgpu_fence_emit_polling(ring, &seq);
+       r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+       if (r)
+               goto failed_undo;
+
        amdgpu_ring_commit(ring);
        spin_unlock_irqrestore(&kiq->ring_lock, flags);
 
@@ -81,6 +85,9 @@ void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
 
        return;
 
+failed_undo:
+       amdgpu_ring_undo(ring);
+       spin_unlock_irqrestore(&kiq->ring_lock, flags);
 failed_kiq:
        pr_err("failed to write reg %x wait reg %x\n", reg0, reg1);
 }
@@ -152,6 +159,19 @@ int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
        return 0;
 }
 
+void amdgpu_virt_request_init_data(struct amdgpu_device *adev)
+{
+       struct amdgpu_virt *virt = &adev->virt;
+
+       if (virt->ops && virt->ops->req_init_data)
+               virt->ops->req_init_data(adev);
+
+       if (adev->virt.req_init_data_ver > 0)
+               DRM_INFO("host supports REQ_INIT_DATA handshake\n");
+       else
+               DRM_WARN("host doesn't support REQ_INIT_DATA handshake\n");
+}
+
 /**
  * amdgpu_virt_wait_reset() - wait for reset gpu completed
  * @amdgpu:    amdgpu device.
@@ -287,3 +307,82 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
                }
        }
 }
+
+void amdgpu_detect_virtualization(struct amdgpu_device *adev)
+{
+       uint32_t reg;
+
+       switch (adev->asic_type) {
+       case CHIP_TONGA:
+       case CHIP_FIJI:
+               reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
+               break;
+       case CHIP_VEGA10:
+       case CHIP_VEGA20:
+       case CHIP_NAVI10:
+       case CHIP_NAVI12:
+       case CHIP_ARCTURUS:
+               reg = RREG32(mmRCC_IOV_FUNC_IDENTIFIER);
+               break;
+       default: /* other chip doesn't support SRIOV */
+               reg = 0;
+               break;
+       }
+
+       if (reg & 1)
+               adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
+
+       if (reg & 0x80000000)
+               adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
+
+       if (!reg) {
+               if (is_virtual_machine())       /* passthrough mode exclus sriov mod */
+                       adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
+       }
+}
+
+bool amdgpu_virt_access_debugfs_is_mmio(struct amdgpu_device *adev)
+{
+       return amdgpu_sriov_is_debug(adev) ? true : false;
+}
+
+bool amdgpu_virt_access_debugfs_is_kiq(struct amdgpu_device *adev)
+{
+       return amdgpu_sriov_is_normal(adev) ? true : false;
+}
+
+int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev)
+{
+       if (!amdgpu_sriov_vf(adev) ||
+           amdgpu_virt_access_debugfs_is_kiq(adev))
+               return 0;
+
+       if (amdgpu_virt_access_debugfs_is_mmio(adev))
+               adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
+       else
+               return -EPERM;
+
+       return 0;
+}
+
+void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev)
+{
+       if (amdgpu_sriov_vf(adev))
+               adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
+}
+
+enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev)
+{
+       enum amdgpu_sriov_vf_mode mode;
+
+       if (amdgpu_sriov_vf(adev)) {
+               if (amdgpu_sriov_is_pp_one_vf(adev))
+                       mode = SRIOV_VF_MODE_ONE_VF;
+               else
+                       mode = SRIOV_VF_MODE_MULTI_VF;
+       } else {
+               mode = SRIOV_VF_MODE_BARE_METAL;
+       }
+
+       return mode;
+}
index f0128f745bd2847c2b7aaee89a3ee7a12a4a2271..b90e822cebd767684df71b9a74788879e9cb4146 100644 (file)
 #define AMDGPU_PASSTHROUGH_MODE        (1 << 3) /* thw whole GPU is pass through for VM */
 #define AMDGPU_SRIOV_CAPS_RUNTIME      (1 << 4) /* is out of full access mode */
 
+/* all asic after AI use this offset */
+#define mmRCC_IOV_FUNC_IDENTIFIER 0xDE5
+/* tonga/fiji use this offset */
+#define mmBIF_IOV_FUNC_IDENTIFIER 0x1503
+
+enum amdgpu_sriov_vf_mode {
+       SRIOV_VF_MODE_BARE_METAL = 0,
+       SRIOV_VF_MODE_ONE_VF,
+       SRIOV_VF_MODE_MULTI_VF,
+};
+
 struct amdgpu_mm_table {
        struct amdgpu_bo        *bo;
        uint32_t                *cpu_addr;
@@ -54,6 +65,7 @@ struct amdgpu_vf_error_buffer {
 struct amdgpu_virt_ops {
        int (*req_full_gpu)(struct amdgpu_device *adev, bool init);
        int (*rel_full_gpu)(struct amdgpu_device *adev, bool init);
+       int (*req_init_data)(struct amdgpu_device *adev);
        int (*reset_gpu)(struct amdgpu_device *adev);
        int (*wait_reset)(struct amdgpu_device *adev);
        void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
@@ -83,6 +95,8 @@ enum AMDGIM_FEATURE_FLAG {
        AMDGIM_FEATURE_GIM_LOAD_UCODES   = 0x2,
        /* VRAM LOST by GIM */
        AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
+       /* MM bandwidth */
+       AMDGIM_FEATURE_GIM_MM_BW_MGR = 0x8,
        /* PP ONE VF MODE in GIM */
        AMDGIM_FEATURE_PP_ONE_VF = (1 << 4),
 };
@@ -256,6 +270,8 @@ struct amdgpu_virt {
        struct amdgpu_virt_fw_reserve   fw_reserve;
        uint32_t gim_feature;
        uint32_t reg_access_mode;
+       int req_init_data_ver;
+       bool tdr_debug;
 };
 
 #define amdgpu_sriov_enabled(adev) \
@@ -287,6 +303,10 @@ static inline bool is_virtual_machine(void)
 
 #define amdgpu_sriov_is_pp_one_vf(adev) \
        ((adev)->virt.gim_feature & AMDGIM_FEATURE_PP_ONE_VF)
+#define amdgpu_sriov_is_debug(adev) \
+       ((!adev->in_gpu_reset) && adev->virt.tdr_debug)
+#define amdgpu_sriov_is_normal(adev) \
+       ((!adev->in_gpu_reset) && (!adev->virt.tdr_debug))
 
 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
 void amdgpu_virt_init_setting(struct amdgpu_device *adev);
@@ -296,6 +316,7 @@ void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init);
 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
+void amdgpu_virt_request_init_data(struct amdgpu_device *adev);
 int amdgpu_virt_wait_reset(struct amdgpu_device *adev);
 int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
 void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
@@ -303,4 +324,11 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
                                        unsigned int key,
                                        unsigned int chksum);
 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
+void amdgpu_detect_virtualization(struct amdgpu_device *adev);
+
+bool amdgpu_virt_can_access_debugfs(struct amdgpu_device *adev);
+int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev);
+void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev);
+
+enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev);
 #endif
index 6d9252a27916d1ee5f887846e79427cd3ed9b9a2..7417754e9141b02fde4b3963efb7ce96132f97b3 100644 (file)
@@ -82,7 +82,7 @@ struct amdgpu_prt_cb {
        struct dma_fence_cb cb;
 };
 
-/**
+/*
  * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS
  * happens while holding this lock anywhere to prevent deadlocks when
  * an MMU notifier runs in reclaim-FS context.
@@ -726,7 +726,7 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
  * @adev: amdgpu_device pointer
  * @vm: VM to clear BO from
  * @bo: BO to clear
- * @direct: use a direct update
+ * @immediate: use an immediate update
  *
  * Root PD needs to be reserved when calling this.
  *
@@ -736,7 +736,7 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
                              struct amdgpu_vm *vm,
                              struct amdgpu_bo *bo,
-                             bool direct)
+                             bool immediate)
 {
        struct ttm_operation_ctx ctx = { true, false };
        unsigned level = adev->vm_manager.root_level;
@@ -795,7 +795,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
        memset(&params, 0, sizeof(params));
        params.adev = adev;
        params.vm = vm;
-       params.direct = direct;
+       params.immediate = immediate;
 
        r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
        if (r)
@@ -850,11 +850,11 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
  * @adev: amdgpu_device pointer
  * @vm: requesting vm
  * @level: the page table level
- * @direct: use a direct update
+ * @immediate: use a immediate update
  * @bp: resulting BO allocation parameters
  */
 static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
-                              int level, bool direct,
+                              int level, bool immediate,
                               struct amdgpu_bo_param *bp)
 {
        memset(bp, 0, sizeof(*bp));
@@ -870,7 +870,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        else if (!vm->root.base.bo || vm->root.base.bo->shadow)
                bp->flags |= AMDGPU_GEM_CREATE_SHADOW;
        bp->type = ttm_bo_type_kernel;
-       bp->no_wait_gpu = direct;
+       bp->no_wait_gpu = immediate;
        if (vm->root.base.bo)
                bp->resv = vm->root.base.bo->tbo.base.resv;
 }
@@ -881,7 +881,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
  * @adev: amdgpu_device pointer
  * @vm: VM to allocate page tables for
  * @cursor: Which page table to allocate
- * @direct: use a direct update
+ * @immediate: use an immediate update
  *
  * Make sure a specific page table or directory is allocated.
  *
@@ -892,7 +892,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
                               struct amdgpu_vm *vm,
                               struct amdgpu_vm_pt_cursor *cursor,
-                              bool direct)
+                              bool immediate)
 {
        struct amdgpu_vm_pt *entry = cursor->entry;
        struct amdgpu_bo_param bp;
@@ -913,7 +913,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
        if (entry->base.bo)
                return 0;
 
-       amdgpu_vm_bo_param(adev, vm, cursor->level, direct, &bp);
+       amdgpu_vm_bo_param(adev, vm, cursor->level, immediate, &bp);
 
        r = amdgpu_bo_create(adev, &bp, &pt);
        if (r)
@@ -925,7 +925,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
        pt->parent = amdgpu_bo_ref(cursor->parent->base.bo);
        amdgpu_vm_bo_base_init(&entry->base, vm, pt);
 
-       r = amdgpu_vm_clear_bo(adev, vm, pt, direct);
+       r = amdgpu_vm_clear_bo(adev, vm, pt, immediate);
        if (r)
                goto error_free_pt;
 
@@ -1276,7 +1276,7 @@ static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
  *
  * @adev: amdgpu_device pointer
  * @vm: requested vm
- * @direct: submit directly to the paging queue
+ * @immediate: submit immediately to the paging queue
  *
  * Makes sure all directories are up to date.
  *
@@ -1284,7 +1284,7 @@ static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
  * 0 for success, error for failure.
  */
 int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
-                         struct amdgpu_vm *vm, bool direct)
+                         struct amdgpu_vm *vm, bool immediate)
 {
        struct amdgpu_vm_update_params params;
        int r;
@@ -1295,7 +1295,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
        memset(&params, 0, sizeof(params));
        params.adev = adev;
        params.vm = vm;
-       params.direct = direct;
+       params.immediate = immediate;
 
        r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
        if (r)
@@ -1446,20 +1446,24 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
                uint64_t incr, entry_end, pe_start;
                struct amdgpu_bo *pt;
 
-               if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
+               if (!params->unlocked) {
                        /* make sure that the page tables covering the
                         * address range are actually allocated
                         */
                        r = amdgpu_vm_alloc_pts(params->adev, params->vm,
-                                               &cursor, params->direct);
+                                               &cursor, params->immediate);
                        if (r)
                                return r;
                }
 
                shift = amdgpu_vm_level_shift(adev, cursor.level);
                parent_shift = amdgpu_vm_level_shift(adev, cursor.level - 1);
-               if (adev->asic_type < CHIP_VEGA10 &&
-                   (flags & AMDGPU_PTE_VALID)) {
+               if (params->unlocked) {
+                       /* Unlocked updates are only allowed on the leaves */
+                       if (amdgpu_vm_pt_descendant(adev, &cursor))
+                               continue;
+               } else if (adev->asic_type < CHIP_VEGA10 &&
+                          (flags & AMDGPU_PTE_VALID)) {
                        /* No huge page support before GMC v9 */
                        if (cursor.level != AMDGPU_VM_PTB) {
                                if (!amdgpu_vm_pt_descendant(adev, &cursor))
@@ -1557,7 +1561,8 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
  *
  * @adev: amdgpu_device pointer
  * @vm: requested vm
- * @direct: direct submission in a page fault
+ * @immediate: immediate submission in a page fault
+ * @unlocked: unlocked invalidation during MM callback
  * @resv: fences we need to sync to
  * @start: start of mapped range
  * @last: last mapped entry
@@ -1572,8 +1577,8 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
  * 0 for success, -EINVAL for failure.
  */
 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
-                                      struct amdgpu_vm *vm, bool direct,
-                                      struct dma_resv *resv,
+                                      struct amdgpu_vm *vm, bool immediate,
+                                      bool unlocked, struct dma_resv *resv,
                                       uint64_t start, uint64_t last,
                                       uint64_t flags, uint64_t addr,
                                       dma_addr_t *pages_addr,
@@ -1586,8 +1591,9 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
        memset(&params, 0, sizeof(params));
        params.adev = adev;
        params.vm = vm;
-       params.direct = direct;
+       params.immediate = immediate;
        params.pages_addr = pages_addr;
+       params.unlocked = unlocked;
 
        /* Implicitly sync to command submissions in the same VM before
         * unmapping. Sync to moving fences before mapping.
@@ -1603,11 +1609,12 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
                goto error_unlock;
        }
 
-       if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
-               struct amdgpu_bo *root = vm->root.base.bo;
+       if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) {
+               struct dma_fence *tmp = dma_fence_get_stub();
 
-               if (!dma_fence_is_signaled(vm->last_direct))
-                       amdgpu_bo_fence(root, vm->last_direct, true);
+               amdgpu_bo_fence(vm->root.base.bo, vm->last_unlocked, true);
+               swap(vm->last_unlocked, tmp);
+               dma_fence_put(tmp);
        }
 
        r = vm->update_funcs->prepare(&params, resv, sync_mode);
@@ -1721,7 +1728,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
                }
 
                last = min((uint64_t)mapping->last, start + max_entries - 1);
-               r = amdgpu_vm_bo_update_mapping(adev, vm, false, resv,
+               r = amdgpu_vm_bo_update_mapping(adev, vm, false, false, resv,
                                                start, last, flags, addr,
                                                dma_addr, fence);
                if (r)
@@ -1784,6 +1791,10 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
 
        if (bo) {
                flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
+
+               if (amdgpu_bo_encrypted(bo))
+                       flags |= AMDGPU_PTE_TMZ;
+
                bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
        } else {
                flags = 0x0;
@@ -2014,7 +2025,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
                    mapping->start < AMDGPU_GMC_HOLE_START)
                        init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
 
-               r = amdgpu_vm_bo_update_mapping(adev, vm, false, resv,
+               r = amdgpu_vm_bo_update_mapping(adev, vm, false, false, resv,
                                                mapping->start, mapping->last,
                                                init_pte_value, 0, NULL, &f);
                amdgpu_vm_free_mapping(adev, vm, mapping, f);
@@ -2124,11 +2135,8 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
        if (bo && amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) &&
            (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)) {
                bo_va->is_xgmi = true;
-               mutex_lock(&adev->vm_manager.lock_pstate);
                /* Power up XGMI if it can be potentially used */
-               if (++adev->vm_manager.xgmi_map_counter == 1)
-                       amdgpu_xgmi_set_pstate(adev, 1);
-               mutex_unlock(&adev->vm_manager.lock_pstate);
+               amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20);
        }
 
        return bo_va;
@@ -2551,12 +2559,8 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
 
        dma_fence_put(bo_va->last_pt_update);
 
-       if (bo && bo_va->is_xgmi) {
-               mutex_lock(&adev->vm_manager.lock_pstate);
-               if (--adev->vm_manager.xgmi_map_counter == 0)
-                       amdgpu_xgmi_set_pstate(adev, 0);
-               mutex_unlock(&adev->vm_manager.lock_pstate);
-       }
+       if (bo && bo_va->is_xgmi)
+               amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MIN);
 
        kfree(bo_va);
 }
@@ -2585,7 +2589,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
                return false;
 
        /* Don't evict VM page tables while they are updated */
-       if (!dma_fence_is_signaled(bo_base->vm->last_direct)) {
+       if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) {
                amdgpu_vm_eviction_unlock(bo_base->vm);
                return false;
        }
@@ -2762,7 +2766,7 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
        if (timeout <= 0)
                return timeout;
 
-       return dma_fence_wait_timeout(vm->last_direct, true, timeout);
+       return dma_fence_wait_timeout(vm->last_unlocked, true, timeout);
 }
 
 /**
@@ -2798,7 +2802,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 
 
        /* create scheduler entities for page table updates */
-       r = drm_sched_entity_init(&vm->direct, DRM_SCHED_PRIORITY_NORMAL,
+       r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
                                  adev->vm_manager.vm_pte_scheds,
                                  adev->vm_manager.vm_pte_num_scheds, NULL);
        if (r)
@@ -2808,7 +2812,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
                                  adev->vm_manager.vm_pte_scheds,
                                  adev->vm_manager.vm_pte_num_scheds, NULL);
        if (r)
-               goto error_free_direct;
+               goto error_free_immediate;
 
        vm->pte_support_ats = false;
        vm->is_compute_context = false;
@@ -2834,7 +2838,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        else
                vm->update_funcs = &amdgpu_vm_sdma_funcs;
        vm->last_update = NULL;
-       vm->last_direct = dma_fence_get_stub();
+       vm->last_unlocked = dma_fence_get_stub();
 
        mutex_init(&vm->eviction_lock);
        vm->evicting = false;
@@ -2888,11 +2892,11 @@ error_free_root:
        vm->root.base.bo = NULL;
 
 error_free_delayed:
-       dma_fence_put(vm->last_direct);
+       dma_fence_put(vm->last_unlocked);
        drm_sched_entity_destroy(&vm->delayed);
 
-error_free_direct:
-       drm_sched_entity_destroy(&vm->direct);
+error_free_immediate:
+       drm_sched_entity_destroy(&vm->immediate);
 
        return r;
 }
@@ -2996,10 +3000,17 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
                   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
                  "CPU update of VM recommended only for large BAR system\n");
 
-       if (vm->use_cpu_for_update)
+       if (vm->use_cpu_for_update) {
+               /* Sync with last SDMA update/clear before switching to CPU */
+               r = amdgpu_bo_sync_wait(vm->root.base.bo,
+                                       AMDGPU_FENCE_OWNER_UNDEFINED, true);
+               if (r)
+                       goto free_idr;
+
                vm->update_funcs = &amdgpu_vm_cpu_funcs;
-       else
+       } else {
                vm->update_funcs = &amdgpu_vm_sdma_funcs;
+       }
        dma_fence_put(vm->last_update);
        vm->last_update = NULL;
        vm->is_compute_context = true;
@@ -3089,8 +3100,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
                vm->pasid = 0;
        }
 
-       dma_fence_wait(vm->last_direct, false);
-       dma_fence_put(vm->last_direct);
+       dma_fence_wait(vm->last_unlocked, false);
+       dma_fence_put(vm->last_unlocked);
 
        list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
                if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
@@ -3107,7 +3118,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
        amdgpu_bo_unref(&root);
        WARN_ON(vm->root.base.bo);
 
-       drm_sched_entity_destroy(&vm->direct);
+       drm_sched_entity_destroy(&vm->immediate);
        drm_sched_entity_destroy(&vm->delayed);
 
        if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
@@ -3166,9 +3177,6 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
 
        idr_init(&adev->vm_manager.pasid_idr);
        spin_lock_init(&adev->vm_manager.pasid_lock);
-
-       adev->vm_manager.xgmi_map_counter = 0;
-       mutex_init(&adev->vm_manager.lock_pstate);
 }
 
 /**
@@ -3343,8 +3351,8 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid,
                value = 0;
        }
 
-       r = amdgpu_vm_bo_update_mapping(adev, vm, true, NULL, addr, addr + 1,
-                                       flags, value, NULL, NULL);
+       r = amdgpu_vm_bo_update_mapping(adev, vm, true, false, NULL, addr,
+                                       addr + 1, flags, value, NULL, NULL);
        if (r)
                goto error_unlock;
 
index 06fe30e1492d69e66d15233af1fa168be406c8b2..c8e68d7890bff92c3efdcb7a9d04e340bc950c6e 100644 (file)
@@ -54,6 +54,9 @@ struct amdgpu_bo_list_entry;
 #define AMDGPU_PTE_SYSTEM      (1ULL << 1)
 #define AMDGPU_PTE_SNOOPED     (1ULL << 2)
 
+/* RV+ */
+#define AMDGPU_PTE_TMZ         (1ULL << 3)
+
 /* VI only */
 #define AMDGPU_PTE_EXECUTABLE  (1ULL << 4)
 
@@ -203,9 +206,14 @@ struct amdgpu_vm_update_params {
        struct amdgpu_vm *vm;
 
        /**
-        * @direct: if changes should be made directly
+        * @immediate: if changes should be made immediately
         */
-       bool direct;
+       bool immediate;
+
+       /**
+        * @unlocked: true if the root BO is not locked
+        */
+       bool unlocked;
 
        /**
         * @pages_addr:
@@ -271,11 +279,11 @@ struct amdgpu_vm {
        struct dma_fence        *last_update;
 
        /* Scheduler entities for page table updates */
-       struct drm_sched_entity direct;
+       struct drm_sched_entity immediate;
        struct drm_sched_entity delayed;
 
-       /* Last submission to the scheduler entities */
-       struct dma_fence        *last_direct;
+       /* Last unlocked submission to the scheduler entities */
+       struct dma_fence        *last_unlocked;
 
        unsigned int            pasid;
        /* dedicated to vm */
@@ -349,10 +357,6 @@ struct amdgpu_vm_manager {
         */
        struct idr                              pasid_idr;
        spinlock_t                              pasid_lock;
-
-       /* counter of mapped memory through xgmi */
-       uint32_t                                xgmi_map_counter;
-       struct mutex                            lock_pstate;
 };
 
 #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
@@ -380,7 +384,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
                              void *param);
 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
 int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
-                         struct amdgpu_vm *vm, bool direct);
+                         struct amdgpu_vm *vm, bool immediate);
 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
                          struct amdgpu_vm *vm,
                          struct dma_fence **fence);
index e3851630407097ad74fdd38d58bc4b8806474ce1..39c704a1fb0e556d832ff42f748a692f42e71364 100644 (file)
@@ -84,7 +84,7 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
 
        pe += (unsigned long)amdgpu_bo_kptr(bo);
 
-       trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct);
+       trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate);
 
        for (i = 0; i < count; i++) {
                value = p->pages_addr ?
index cf96c335b258b479c37e10fadad3739a51862ad2..8d9c6feba660b58c9fd6fd6aa37a8b69831364fd 100644 (file)
@@ -61,10 +61,12 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
                                  struct dma_resv *resv,
                                  enum amdgpu_sync_mode sync_mode)
 {
+       enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
+               : AMDGPU_IB_POOL_DELAYED;
        unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
        int r;
 
-       r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, &p->job);
+       r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool, &p->job);
        if (r)
                return r;
 
@@ -90,11 +92,11 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
 {
        struct amdgpu_ib *ib = p->job->ibs;
        struct drm_sched_entity *entity;
-       struct dma_fence *f, *tmp;
        struct amdgpu_ring *ring;
+       struct dma_fence *f;
        int r;
 
-       entity = p->direct ? &p->vm->direct : &p->vm->delayed;
+       entity = p->immediate ? &p->vm->immediate : &p->vm->delayed;
        ring = container_of(entity->rq->sched, struct amdgpu_ring, sched);
 
        WARN_ON(ib->length_dw == 0);
@@ -104,15 +106,16 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
        if (r)
                goto error;
 
-       if (p->direct) {
-               tmp = dma_fence_get(f);
-               swap(p->vm->last_direct, tmp);
+       if (p->unlocked) {
+               struct dma_fence *tmp = dma_fence_get(f);
+
+               swap(p->vm->last_unlocked, f);
                dma_fence_put(tmp);
        } else {
-               dma_resv_add_shared_fence(p->vm->root.base.bo->tbo.base.resv, f);
+               amdgpu_bo_fence(p->vm->root.base.bo, f, true);
        }
 
-       if (fence && !p->direct)
+       if (fence && !p->immediate)
                swap(*fence, f);
        dma_fence_put(f);
        return 0;
@@ -142,7 +145,7 @@ static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p,
        src += p->num_dw_left * 4;
 
        pe += amdgpu_gmc_sign_extend(bo->tbo.offset);
-       trace_amdgpu_vm_copy_ptes(pe, src, count, p->direct);
+       trace_amdgpu_vm_copy_ptes(pe, src, count, p->immediate);
 
        amdgpu_vm_copy_pte(p->adev, ib, pe, src, count);
 }
@@ -169,7 +172,7 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
        struct amdgpu_ib *ib = p->job->ibs;
 
        pe += amdgpu_gmc_sign_extend(bo->tbo.offset);
-       trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct);
+       trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate);
        if (count < 3) {
                amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags,
                                    count, incr);
@@ -198,6 +201,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
                                 uint64_t addr, unsigned count, uint32_t incr,
                                 uint64_t flags)
 {
+       enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
+               : AMDGPU_IB_POOL_DELAYED;
        unsigned int i, ndw, nptes;
        uint64_t *pte;
        int r;
@@ -223,7 +228,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
                        ndw = max(ndw, AMDGPU_VM_SDMA_MIN_NUM_DW);
                        ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
 
-                       r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, &p->job);
+                       r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool,
+                                                    &p->job);
                        if (r)
                                return r;
 
index 82a3299e53c042f6c8a5f8e688e0f36a15ca60e4..d399e58931705c9da41e1361be76041adb947ad2 100644 (file)
@@ -22,6 +22,7 @@
  * Authors: Christian König
  */
 
+#include <linux/dma-mapping.h>
 #include "amdgpu.h"
 #include "amdgpu_vm.h"
 #include "amdgpu_atomfirmware.h"
@@ -148,6 +149,15 @@ static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO,
 static DEVICE_ATTR(mem_info_vram_vendor, S_IRUGO,
                   amdgpu_mem_info_vram_vendor, NULL);
 
+static const struct attribute *amdgpu_vram_mgr_attributes[] = {
+       &dev_attr_mem_info_vram_total.attr,
+       &dev_attr_mem_info_vis_vram_total.attr,
+       &dev_attr_mem_info_vram_used.attr,
+       &dev_attr_mem_info_vis_vram_used.attr,
+       &dev_attr_mem_info_vram_vendor.attr,
+       NULL
+};
+
 /**
  * amdgpu_vram_mgr_init - init VRAM manager and DRM MM
  *
@@ -172,31 +182,9 @@ static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
        man->priv = mgr;
 
        /* Add the two VRAM-related sysfs files */
-       ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_total);
-       if (ret) {
-               DRM_ERROR("Failed to create device file mem_info_vram_total\n");
-               return ret;
-       }
-       ret = device_create_file(adev->dev, &dev_attr_mem_info_vis_vram_total);
-       if (ret) {
-               DRM_ERROR("Failed to create device file mem_info_vis_vram_total\n");
-               return ret;
-       }
-       ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_used);
-       if (ret) {
-               DRM_ERROR("Failed to create device file mem_info_vram_used\n");
-               return ret;
-       }
-       ret = device_create_file(adev->dev, &dev_attr_mem_info_vis_vram_used);
-       if (ret) {
-               DRM_ERROR("Failed to create device file mem_info_vis_vram_used\n");
-               return ret;
-       }
-       ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_vendor);
-       if (ret) {
-               DRM_ERROR("Failed to create device file mem_info_vram_vendor\n");
-               return ret;
-       }
+       ret = sysfs_create_files(&adev->dev->kobj, amdgpu_vram_mgr_attributes);
+       if (ret)
+               DRM_ERROR("Failed to register sysfs\n");
 
        return 0;
 }
@@ -219,11 +207,7 @@ static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
        spin_unlock(&mgr->lock);
        kfree(mgr);
        man->priv = NULL;
-       device_remove_file(adev->dev, &dev_attr_mem_info_vram_total);
-       device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_total);
-       device_remove_file(adev->dev, &dev_attr_mem_info_vram_used);
-       device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_used);
-       device_remove_file(adev->dev, &dev_attr_mem_info_vram_vendor);
+       sysfs_remove_files(&adev->dev->kobj, amdgpu_vram_mgr_attributes);
        return 0;
 }
 
@@ -458,6 +442,104 @@ static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
        mem->mm_node = NULL;
 }
 
+/**
+ * amdgpu_vram_mgr_alloc_sgt - allocate and fill a sg table
+ *
+ * @adev: amdgpu device pointer
+ * @mem: TTM memory object
+ * @dev: the other device
+ * @dir: dma direction
+ * @sgt: resulting sg table
+ *
+ * Allocate and fill a sg table from a VRAM allocation.
+ */
+int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
+                             struct ttm_mem_reg *mem,
+                             struct device *dev,
+                             enum dma_data_direction dir,
+                             struct sg_table **sgt)
+{
+       struct drm_mm_node *node;
+       struct scatterlist *sg;
+       int num_entries = 0;
+       unsigned int pages;
+       int i, r;
+
+       *sgt = kmalloc(sizeof(*sg), GFP_KERNEL);
+       if (!*sgt)
+               return -ENOMEM;
+
+       for (pages = mem->num_pages, node = mem->mm_node;
+            pages; pages -= node->size, ++node)
+               ++num_entries;
+
+       r = sg_alloc_table(*sgt, num_entries, GFP_KERNEL);
+       if (r)
+               goto error_free;
+
+       for_each_sg((*sgt)->sgl, sg, num_entries, i)
+               sg->length = 0;
+
+       node = mem->mm_node;
+       for_each_sg((*sgt)->sgl, sg, num_entries, i) {
+               phys_addr_t phys = (node->start << PAGE_SHIFT) +
+                       adev->gmc.aper_base;
+               size_t size = node->size << PAGE_SHIFT;
+               dma_addr_t addr;
+
+               ++node;
+               addr = dma_map_resource(dev, phys, size, dir,
+                                       DMA_ATTR_SKIP_CPU_SYNC);
+               r = dma_mapping_error(dev, addr);
+               if (r)
+                       goto error_unmap;
+
+               sg_set_page(sg, NULL, size, 0);
+               sg_dma_address(sg) = addr;
+               sg_dma_len(sg) = size;
+       }
+       return 0;
+
+error_unmap:
+       for_each_sg((*sgt)->sgl, sg, num_entries, i) {
+               if (!sg->length)
+                       continue;
+
+               dma_unmap_resource(dev, sg->dma_address,
+                                  sg->length, dir,
+                                  DMA_ATTR_SKIP_CPU_SYNC);
+       }
+       sg_free_table(*sgt);
+
+error_free:
+       kfree(*sgt);
+       return r;
+}
+
+/**
+ * amdgpu_vram_mgr_alloc_sgt - allocate and fill a sg table
+ *
+ * @adev: amdgpu device pointer
+ * @sgt: sg table to free
+ *
+ * Free a previously allocate sg table.
+ */
+void amdgpu_vram_mgr_free_sgt(struct amdgpu_device *adev,
+                             struct device *dev,
+                             enum dma_data_direction dir,
+                             struct sg_table *sgt)
+{
+       struct scatterlist *sg;
+       int i;
+
+       for_each_sg(sgt->sgl, sg, sgt->nents, i)
+               dma_unmap_resource(dev, sg->dma_address,
+                                  sg->length, dir,
+                                  DMA_ATTR_SKIP_CPU_SYNC);
+       sg_free_table(sgt);
+       kfree(sgt);
+}
+
 /**
  * amdgpu_vram_mgr_usage - how many bytes are used in this domain
  *
index 95b3327168acb0354b4e03c09cfa3be041fe2f8a..91837a9913193cb737e036e0c93a424642f6e123 100644 (file)
@@ -325,9 +325,18 @@ success:
 static void amdgpu_xgmi_sysfs_rem_dev_info(struct amdgpu_device *adev,
                                          struct amdgpu_hive_info *hive)
 {
+       char node[10];
+       memset(node, 0, sizeof(node));
+
        device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
-       sysfs_remove_link(&adev->dev->kobj, adev->ddev->unique);
-       sysfs_remove_link(hive->kobj, adev->ddev->unique);
+       device_remove_file(adev->dev, &dev_attr_xgmi_error);
+
+       if (adev != hive->adev)
+               sysfs_remove_link(&adev->dev->kobj,"xgmi_hive_info");
+
+       sprintf(node, "node%d", hive->number_devices);
+       sysfs_remove_link(hive->kobj, node);
+
 }
 
 
@@ -373,7 +382,13 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lo
 
        if (lock)
                mutex_lock(&tmp->hive_lock);
-       tmp->pstate = -1;
+       tmp->pstate = AMDGPU_XGMI_PSTATE_UNKNOWN;
+       tmp->hi_req_gpu = NULL;
+       /*
+        * hive pstate on boot is high in vega20 so we have to go to low
+        * pstate on after boot.
+        */
+       tmp->hi_req_count = AMDGPU_MAX_XGMI_DEVICE_PER_HIVE;
        mutex_unlock(&xgmi_mutex);
 
        return tmp;
@@ -383,56 +398,59 @@ int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate)
 {
        int ret = 0;
        struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
-       struct amdgpu_device *tmp_adev;
-       bool update_hive_pstate = true;
-       bool is_high_pstate = pstate && adev->asic_type == CHIP_VEGA20;
+       struct amdgpu_device *request_adev = hive->hi_req_gpu ?
+                                               hive->hi_req_gpu : adev;
+       bool is_hi_req = pstate == AMDGPU_XGMI_PSTATE_MAX_VEGA20;
+       bool init_low = hive->pstate == AMDGPU_XGMI_PSTATE_UNKNOWN;
 
-       if (!hive)
+       /* fw bug so temporarily disable pstate switching */
+       return 0;
+
+       if (!hive || adev->asic_type != CHIP_VEGA20)
                return 0;
 
        mutex_lock(&hive->hive_lock);
 
-       if (hive->pstate == pstate) {
-               adev->pstate = is_high_pstate ? pstate : adev->pstate;
+       if (is_hi_req)
+               hive->hi_req_count++;
+       else
+               hive->hi_req_count--;
+
+       /*
+        * Vega20 only needs single peer to request pstate high for the hive to
+        * go high but all peers must request pstate low for the hive to go low
+        */
+       if (hive->pstate == pstate ||
+                       (!is_hi_req && hive->hi_req_count && !init_low))
                goto out;
-       }
 
-       dev_dbg(adev->dev, "Set xgmi pstate %d.\n", pstate);
+       dev_dbg(request_adev->dev, "Set xgmi pstate %d.\n", pstate);
 
-       ret = amdgpu_dpm_set_xgmi_pstate(adev, pstate);
+       ret = amdgpu_dpm_set_xgmi_pstate(request_adev, pstate);
        if (ret) {
-               dev_err(adev->dev,
+               dev_err(request_adev->dev,
                        "XGMI: Set pstate failure on device %llx, hive %llx, ret %d",
-                       adev->gmc.xgmi.node_id,
-                       adev->gmc.xgmi.hive_id, ret);
+                       request_adev->gmc.xgmi.node_id,
+                       request_adev->gmc.xgmi.hive_id, ret);
                goto out;
        }
 
-       /* Update device pstate */
-       adev->pstate = pstate;
-
-       /*
-        * Update the hive pstate only all devices of the hive
-        * are in the same pstate
-        */
-       list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
-               if (tmp_adev->pstate != adev->pstate) {
-                       update_hive_pstate = false;
-                       break;
-               }
-       }
-       if (update_hive_pstate || is_high_pstate)
+       if (init_low)
+               hive->pstate = hive->hi_req_count ?
+                                       hive->pstate : AMDGPU_XGMI_PSTATE_MIN;
+       else {
                hive->pstate = pstate;
-
+               hive->hi_req_gpu = pstate != AMDGPU_XGMI_PSTATE_MIN ?
+                                                       adev : NULL;
+       }
 out:
        mutex_unlock(&hive->hive_lock);
-
        return ret;
 }
 
 int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev)
 {
-       int ret = -EINVAL;
+       int ret;
 
        /* Each psp need to set the latest topology */
        ret = psp_xgmi_set_topology_info(&adev->psp,
@@ -507,9 +525,6 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
                goto exit;
        }
 
-       /* Set default device pstate */
-       adev->pstate = -1;
-
        top_info = &adev->psp.xgmi_context.top_info;
 
        list_add_tail(&adev->gmc.xgmi.head, &hive->device_list);
@@ -577,14 +592,14 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
        if (!hive)
                return -EINVAL;
 
-       if (!(hive->number_devices--)) {
+       task_barrier_rem_task(&hive->tb);
+       amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
+       mutex_unlock(&hive->hive_lock);
+
+       if(!(--hive->number_devices)){
                amdgpu_xgmi_sysfs_destroy(adev, hive);
                mutex_destroy(&hive->hive_lock);
                mutex_destroy(&hive->reset_lock);
-       } else {
-               task_barrier_rem_task(&hive->tb);
-               amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
-               mutex_unlock(&hive->hive_lock);
        }
 
        return psp_xgmi_terminate(&adev->psp);
@@ -604,6 +619,8 @@ int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
            adev->gmc.xgmi.num_physical_nodes == 0)
                return 0;
 
+       amdgpu_xgmi_reset_ras_error_count(adev);
+
        if (!adev->gmc.xgmi.ras_if) {
                adev->gmc.xgmi.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
                if (!adev->gmc.xgmi.ras_if)
@@ -641,31 +658,34 @@ void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev)
 uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
                                           uint64_t addr)
 {
-       uint32_t df_inst_id;
-       uint64_t dram_base_addr = 0;
-       const struct amdgpu_df_funcs *df_funcs = adev->df.funcs;
-
-       if ((!df_funcs)                 ||
-           (!df_funcs->get_df_inst_id) ||
-           (!df_funcs->get_dram_base_addr)) {
-               dev_warn(adev->dev,
-                        "XGMI: relative phy_addr algorithm is not supported\n");
-               return addr;
-       }
-
-       if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) {
-               dev_warn(adev->dev,
-                        "failed to disable DF-Cstate, DF register may not be accessible\n");
-               return addr;
-       }
+       struct amdgpu_xgmi *xgmi = &adev->gmc.xgmi;
+       return (addr + xgmi->physical_node_id * xgmi->node_segment_size);
+}
 
-       df_inst_id = df_funcs->get_df_inst_id(adev);
-       dram_base_addr = df_funcs->get_dram_base_addr(adev, df_inst_id);
+static void pcs_clear_status(struct amdgpu_device *adev, uint32_t pcs_status_reg)
+{
+       WREG32_PCIE(pcs_status_reg, 0xFFFFFFFF);
+       WREG32_PCIE(pcs_status_reg, 0);
+}
 
-       if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
-               dev_warn(adev->dev, "failed to enable DF-Cstate\n");
+void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
+{
+       uint32_t i;
 
-       return addr + dram_base_addr;
+       switch (adev->asic_type) {
+       case CHIP_ARCTURUS:
+               for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++)
+                       pcs_clear_status(adev,
+                                        xgmi_pcs_err_status_reg_arct[i]);
+               break;
+       case CHIP_VEGA20:
+               for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++)
+                       pcs_clear_status(adev,
+                                        xgmi_pcs_err_status_reg_vg20[i]);
+               break;
+       default:
+               break;
+       }
 }
 
 static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev,
@@ -758,6 +778,8 @@ int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
                break;
        }
 
+       amdgpu_xgmi_reset_ras_error_count(adev);
+
        err_data->ue_count += ue_cnt;
        err_data->ce_count += ce_cnt;
 
index 4a92067fe595985609fc4f5b29a4a06704631728..6999eab16a72090c184736d432ffc857e4a935d6 100644 (file)
@@ -25,6 +25,7 @@
 #include <drm/task_barrier.h>
 #include "amdgpu_psp.h"
 
+
 struct amdgpu_hive_info {
        uint64_t                hive_id;
        struct list_head        device_list;
@@ -33,8 +34,14 @@ struct amdgpu_hive_info {
        struct kobject *kobj;
        struct device_attribute dev_attr;
        struct amdgpu_device *adev;
-       int pstate; /*0 -- low , 1 -- high , -1 unknown*/
+       int hi_req_count;
+       struct amdgpu_device *hi_req_gpu;
        struct task_barrier tb;
+       enum {
+               AMDGPU_XGMI_PSTATE_MIN,
+               AMDGPU_XGMI_PSTATE_MAX_VEGA20,
+               AMDGPU_XGMI_PSTATE_UNKNOWN
+       } pstate;
 };
 
 struct amdgpu_pcs_ras_field {
@@ -56,6 +63,7 @@ uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
                                           uint64_t addr);
 int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
                                      void *ras_error_status);
+void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev);
 
 static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
                struct amdgpu_device *bo_adev)
index cae426c7c0863b06c7c50fcbefae95ce37262ba0..4cfc786699c7fcaac2b903c463ea85bbd9379812 100644 (file)
@@ -54,6 +54,8 @@
 #define PLL_INDEX      2
 #define PLL_DATA       3
 
+#define ATOM_CMD_TIMEOUT_SEC   20
+
 typedef struct {
        struct atom_context *ctx;
        uint32_t *ps, *ws;
@@ -744,8 +746,9 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
                        cjiffies = jiffies;
                        if (time_after(cjiffies, ctx->last_jump_jiffies)) {
                                cjiffies -= ctx->last_jump_jiffies;
-                               if ((jiffies_to_msecs(cjiffies) > 10000)) {
-                                       DRM_ERROR("atombios stuck in loop for more than 10secs aborting\n");
+                               if ((jiffies_to_msecs(cjiffies) > ATOM_CMD_TIMEOUT_SEC*1000)) {
+                                       DRM_ERROR("atombios stuck in loop for more than %dsecs aborting\n",
+                                                 ATOM_CMD_TIMEOUT_SEC);
                                        ctx->abort = true;
                                }
                        } else {
index 62635e58e45eede7c7a48e1b0c35372f8607c896..fe306d0f73f7a14a521183f667360d3d56f027cc 100644 (file)
@@ -1809,12 +1809,6 @@ static uint32_t cik_get_rev_id(struct amdgpu_device *adev)
                >> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
 }
 
-static void cik_detect_hw_virtualization(struct amdgpu_device *adev)
-{
-       if (is_virtual_machine()) /* passthrough mode */
-               adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
-}
-
 static void cik_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
 {
        if (!ring || !ring->funcs->emit_wreg) {
@@ -2177,8 +2171,6 @@ static const struct amdgpu_ip_block_version cik_common_ip_block =
 
 int cik_set_ip_blocks(struct amdgpu_device *adev)
 {
-       cik_detect_hw_virtualization(adev);
-
        switch (adev->asic_type) {
        case CHIP_BONAIRE:
                amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
index 580d3f93d67093ec1925e671d8fea7d2a6f5e2ac..20f108818b2b966f5893f05e7b72e233d0b3054a 100644 (file)
@@ -320,8 +320,6 @@ static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
                WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
                WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0);
        }
-       sdma0->sched.ready = false;
-       sdma1->sched.ready = false;
 }
 
 /**
@@ -679,7 +677,8 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        tmp = 0xCAFEDEAD;
        adev->wb.wb[index] = cpu_to_le32(tmp);
        memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, 256, &ib);
+       r = amdgpu_ib_get(adev, NULL, 256,
+                                       AMDGPU_IB_POOL_DIRECT, &ib);
        if (r)
                goto err0;
 
@@ -980,7 +979,8 @@ static int cik_sdma_sw_init(void *handle)
                                     &adev->sdma.trap_irq,
                                     (i == 0) ?
                                     AMDGPU_SDMA_IRQ_INSTANCE0 :
-                                    AMDGPU_SDMA_IRQ_INSTANCE1);
+                                    AMDGPU_SDMA_IRQ_INSTANCE1,
+                                    AMDGPU_RING_PRIO_DEFAULT);
                if (r)
                        return r;
        }
@@ -1313,7 +1313,8 @@ static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev)
 static void cik_sdma_emit_copy_buffer(struct amdgpu_ib *ib,
                                      uint64_t src_offset,
                                      uint64_t dst_offset,
-                                     uint32_t byte_count)
+                                     uint32_t byte_count,
+                                     bool tmz)
 {
        ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0);
        ib->ptr[ib->length_dw++] = byte_count;
index cee6e8a3ad9c9227c81dcfe2f568daf508686219..5f3f6ebfb3876222e69804bba5ffdc899c35288d 100644 (file)
 #              define PACKET3_DMA_DATA_CMD_SAIC    (1 << 28)
 #              define PACKET3_DMA_DATA_CMD_DAIC    (1 << 29)
 #              define PACKET3_DMA_DATA_CMD_RAW_WAIT  (1 << 30)
-#define        PACKET3_AQUIRE_MEM                              0x58
+#define        PACKET3_ACQUIRE_MEM                             0x58
 #define        PACKET3_REWIND                                  0x59
 #define        PACKET3_LOAD_UCONFIG_REG                        0x5E
 #define        PACKET3_LOAD_SH_REG                             0x5F
index 2512e7ebfedf2ef00be981a5528b5981dba555ec..e38744d06f4e51c390479f11c7a0eb65ca98a781 100644 (file)
@@ -2303,9 +2303,9 @@ static void dce_v10_0_hide_cursor(struct drm_crtc *crtc)
        struct amdgpu_device *adev = crtc->dev->dev_private;
        u32 tmp;
 
-       tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
+       tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
        tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0);
-       WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+       WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
 }
 
 static void dce_v10_0_show_cursor(struct drm_crtc *crtc)
@@ -2319,10 +2319,10 @@ static void dce_v10_0_show_cursor(struct drm_crtc *crtc)
        WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
               lower_32_bits(amdgpu_crtc->cursor_addr));
 
-       tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
+       tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
        tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
        tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
-       WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+       WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
 }
 
 static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc,
index 0dde22db9848690f1e50c3be0e819ed03151aece..2584ff74423bc391e9971a514ef641ec7185c13d 100644 (file)
@@ -2382,9 +2382,9 @@ static void dce_v11_0_hide_cursor(struct drm_crtc *crtc)
        struct amdgpu_device *adev = crtc->dev->dev_private;
        u32 tmp;
 
-       tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
+       tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
        tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0);
-       WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+       WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
 }
 
 static void dce_v11_0_show_cursor(struct drm_crtc *crtc)
@@ -2398,10 +2398,10 @@ static void dce_v11_0_show_cursor(struct drm_crtc *crtc)
        WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
               lower_32_bits(amdgpu_crtc->cursor_addr));
 
-       tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
+       tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
        tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
        tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
-       WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+       WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
 }
 
 static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc,
index 84219534bd38c6ac3f73749b977605c7b161b593..d05c39f9ae40ebda7975c507d21dd9d00c02764e 100644 (file)
@@ -2194,9 +2194,9 @@ static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
        struct amdgpu_device *adev = crtc->dev->dev_private;
 
-       WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
-                  (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
-                  (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
+       WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
+              (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
+              (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
 
 
 }
@@ -2211,10 +2211,10 @@ static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
        WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
               lower_32_bits(amdgpu_crtc->cursor_addr));
 
-       WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
-                  CUR_CONTROL__CURSOR_EN_MASK |
-                  (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
-                  (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
+       WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
+              CUR_CONTROL__CURSOR_EN_MASK |
+              (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
+              (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
 
 }
 
index 3a640702d7d193ad44bb216fe7ace2b0507df236..ad0f8adb6a2b61d7c06674db2c18063b3ed480ce 100644 (file)
@@ -2205,9 +2205,9 @@ static void dce_v8_0_hide_cursor(struct drm_crtc *crtc)
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
        struct amdgpu_device *adev = crtc->dev->dev_private;
 
-       WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
-                  (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
-                  (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
+       WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
+              (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
+              (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
 }
 
 static void dce_v8_0_show_cursor(struct drm_crtc *crtc)
@@ -2220,10 +2220,10 @@ static void dce_v8_0_show_cursor(struct drm_crtc *crtc)
        WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
               lower_32_bits(amdgpu_crtc->cursor_addr));
 
-       WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
-                  CUR_CONTROL__CURSOR_EN_MASK |
-                  (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
-                  (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
+       WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
+              CUR_CONTROL__CURSOR_EN_MASK |
+              (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
+              (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
 }
 
 static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
index 13e12be667fc2a7148a231cdbec3559410daa864..d5ff7b6331ff98c7636cf04ad31be92e0f4b5a8c 100644 (file)
@@ -172,8 +172,9 @@ static void dce_virtual_crtc_disable(struct drm_crtc *crtc)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
 
-       dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+       drm_crtc_vblank_off(crtc);
 
+       amdgpu_crtc->enabled = false;
        amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
        amdgpu_crtc->encoder = NULL;
        amdgpu_crtc->connector = NULL;
@@ -286,7 +287,7 @@ static int dce_virtual_get_modes(struct drm_connector *connector)
        static const struct mode_size {
                int w;
                int h;
-       } common_modes[17] = {
+       } common_modes[21] = {
                { 640,  480},
                { 720,  480},
                { 800,  600},
@@ -303,10 +304,14 @@ static int dce_virtual_get_modes(struct drm_connector *connector)
                {1680, 1050},
                {1600, 1200},
                {1920, 1080},
-               {1920, 1200}
+               {1920, 1200},
+               {4096, 3112},
+               {3656, 2664},
+               {3840, 2160},
+               {4096, 2160},
        };
 
-       for (i = 0; i < 17; i++) {
+       for (i = 0; i < 21; i++) {
                mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
                drm_mode_probed_add(connector, mode);
        }
index 5a1bd8ed1a6c51ab34d0d65f834c3d5a8ccd666a..a7b8292cefee2998fe3d03068a1eb3e20aa6408f 100644 (file)
@@ -686,58 +686,6 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
        }
 }
 
-static uint64_t df_v3_6_get_dram_base_addr(struct amdgpu_device *adev,
-                                          uint32_t df_inst)
-{
-       uint32_t base_addr_reg_val      = 0;
-       uint64_t base_addr              = 0;
-
-       base_addr_reg_val = RREG32_PCIE(smnDF_CS_UMC_AON0_DramBaseAddress0 +
-                                       df_inst * DF_3_6_SMN_REG_INST_DIST);
-
-       if (REG_GET_FIELD(base_addr_reg_val,
-                         DF_CS_UMC_AON0_DramBaseAddress0,
-                         AddrRngVal) == 0) {
-               DRM_WARN("address range not valid");
-               return 0;
-       }
-
-       base_addr = REG_GET_FIELD(base_addr_reg_val,
-                                 DF_CS_UMC_AON0_DramBaseAddress0,
-                                 DramBaseAddr);
-
-       return base_addr << 28;
-}
-
-static uint32_t df_v3_6_get_df_inst_id(struct amdgpu_device *adev)
-{
-       uint32_t xgmi_node_id   = 0;
-       uint32_t df_inst_id     = 0;
-
-       /* Walk through DF dst nodes to find current XGMI node */
-       for (df_inst_id = 0; df_inst_id < DF_3_6_INST_CNT; df_inst_id++) {
-
-               xgmi_node_id = RREG32_PCIE(smnDF_CS_UMC_AON0_DramLimitAddress0 +
-                                          df_inst_id * DF_3_6_SMN_REG_INST_DIST);
-               xgmi_node_id = REG_GET_FIELD(xgmi_node_id,
-                                            DF_CS_UMC_AON0_DramLimitAddress0,
-                                            DstFabricID);
-
-               /* TODO: establish reason dest fabric id is offset by 7 */
-               xgmi_node_id = xgmi_node_id >> 7;
-
-               if (adev->gmc.xgmi.physical_node_id == xgmi_node_id)
-                       break;
-       }
-
-       if (df_inst_id == DF_3_6_INST_CNT) {
-               DRM_WARN("cant match df dst id with gpu node");
-               return 0;
-       }
-
-       return df_inst_id;
-}
-
 const struct amdgpu_df_funcs df_v3_6_funcs = {
        .sw_init = df_v3_6_sw_init,
        .sw_fini = df_v3_6_sw_fini,
@@ -752,6 +700,4 @@ const struct amdgpu_df_funcs df_v3_6_funcs = {
        .pmc_get_count = df_v3_6_pmc_get_count,
        .get_fica = df_v3_6_get_fica,
        .set_fica = df_v3_6_set_fica,
-       .get_dram_base_addr = df_v3_6_get_dram_base_addr,
-       .get_df_inst_id = df_v3_6_get_df_inst_id
 };
index 0e0daf0021b60aa9bf0b94ccecb40388f257133d..bd5dd4f6431103b8fded2463a37717870da6f6f8 100644 (file)
@@ -138,6 +138,1062 @@ static const struct soc15_reg_golden golden_settings_gc_10_0_nv10[] =
        /* Pending on emulation bring up */
 };
 
+static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_0_nv10[] =
+{
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xec),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xec),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xfc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xfc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x100),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x100),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x104),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x104),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x118),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x118),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x11c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x11c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x120),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x120),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x124),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x124),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xdc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xdc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x110),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x110),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x114),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x114),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x108),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x108),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x128),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x128),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x12c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x12c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x138),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x138),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x13c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x13c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x130),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x130),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x134),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x134),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x140),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x140),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x144),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x144),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x150),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x150),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x154),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x154),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x148),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x148),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x158),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x158),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x15c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x15c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x168),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x168),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x16c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x16c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x160),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x160),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x164),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x164),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x170),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x170),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x174),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x174),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x180),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x180),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x184),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x184),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x178),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x178),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x17c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x17c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x188),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x188),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x198),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x198),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x19c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x19c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x190),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x190),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x194),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x194),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1f),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1f),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLOBALS_SAMPLE_SKEW, 0x000000FF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLOBALS_MUXSEL_SKEW, 0x000000FF, 0x20),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLE_SKEW, 0x000000FF, 0x5),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLE_SKEW, 0x000000FF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_MUXSEL_SKEW, 0x000000FF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_MUXSEL_SKEW, 0x000000FF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_DESER_START_SKEW, 0x000000FF, 0x33),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xFFFFFFFF, 0xe0000000)
+};
+
 static const struct soc15_reg_golden golden_settings_gc_10_1_1[] =
 {
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x003c0014),
@@ -272,11 +1328,1691 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] =
        /* Pending on emulation bring up */
 };
 
+static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_nv14[] =
+{
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000L, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x20),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1e),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1e),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1e),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1e),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x20),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1e),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1e),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xdc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xec),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x100),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x104),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x108),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x110),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x114),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x118),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x11c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x130),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x134),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x138),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x13c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x20),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x128),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x12c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x120),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x124),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x140),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x144),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x150),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x154),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x148),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x158),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x15c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x168),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x16c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x160),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x164),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x170),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x174),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x180),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x184),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x178),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x17c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x188),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x198),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x19c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x190),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x194),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1a0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1a4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1b0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1b4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1a8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1ac),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1b8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1bc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1c8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1cc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1c0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1c4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x20),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x20),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLOBALS_SAMPLE_SKEW, 0x000000FF, 0x26),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLOBALS_MUXSEL_SKEW, 0x000000FF, 0x28),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLE_SKEW, 0x000000FF, 0xf),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLE_SKEW, 0x000000FF, 0x15),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_MUXSEL_SKEW, 0x000000FF, 0x1f),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_MUXSEL_SKEW, 0x000000FF, 0x25),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_DESER_START_SKEW, 0x000000FF, 0x3b),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xFFFFFFFF, 0xe0000000)
+};
+
 static const struct soc15_reg_golden golden_settings_gc_10_1_2_nv12[] =
 {
        /* Pending on emulation bring up */
 };
 
+static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_2_nv12[] =
+{
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000L, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xec),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xec),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xfc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xfc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x100),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x100),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x104),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x104),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x118),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x118),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x11c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x11c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x120),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x120),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x124),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x124),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xdc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xdc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x110),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x110),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x114),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x114),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x108),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x108),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd8),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x128),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x128),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x12c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x12c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x138),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x138),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x13c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x13c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x130),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x130),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x134),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x134),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x140),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x140),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x144),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x144),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x150),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x150),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x154),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x154),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x148),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x148),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x158),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x158),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x15c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x15c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x168),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x168),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x16c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x16c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x160),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x160),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x164),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x164),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x170),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x170),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x174),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x174),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x180),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x180),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x184),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x184),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x178),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x178),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x17c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x17c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x188),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x188),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x198),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x198),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x19c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x19c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x190),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x190),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x194),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x194),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1f),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1f),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLOBALS_SAMPLE_SKEW, 0x000000FF, 0x1f),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLOBALS_MUXSEL_SKEW, 0x000000FF, 0x22),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLE_SKEW, 0x000000FF, 0x1),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLE_SKEW, 0x000000FF, 0x6),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_MUXSEL_SKEW, 0x000000FF, 0x10),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_MUXSEL_SKEW, 0x000000FF, 0x15),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_DESER_START_SKEW, 0x000000FF, 0x35),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xFFFFFFFF, 0xe0000000)
+};
+
 #define DEFAULT_SH_MEM_CONFIG \
        ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
         (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
@@ -301,7 +3037,7 @@ static int gfx_v10_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev);
 static int gfx_v10_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev);
 static void gfx_v10_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume);
 static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
-static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start);
+static void gfx_v10_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure);
 
 static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
 {
@@ -431,6 +3167,9 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
                soc15_program_register_sequence(adev,
                                                golden_settings_gc_10_0_nv10,
                                                (const u32)ARRAY_SIZE(golden_settings_gc_10_0_nv10));
+               soc15_program_register_sequence(adev,
+                                               golden_settings_gc_rlc_spm_10_0_nv10,
+                                               (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_0_nv10));
                break;
        case CHIP_NAVI14:
                soc15_program_register_sequence(adev,
@@ -439,6 +3178,9 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
                soc15_program_register_sequence(adev,
                                                golden_settings_gc_10_1_nv14,
                                                (const u32)ARRAY_SIZE(golden_settings_gc_10_1_nv14));
+               soc15_program_register_sequence(adev,
+                                               golden_settings_gc_rlc_spm_10_1_nv14,
+                                               (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_1_nv14));
                break;
        case CHIP_NAVI12:
                soc15_program_register_sequence(adev,
@@ -447,6 +3189,9 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
                soc15_program_register_sequence(adev,
                                                golden_settings_gc_10_1_2_nv12,
                                                (const u32)ARRAY_SIZE(golden_settings_gc_10_1_2_nv12));
+               soc15_program_register_sequence(adev,
+                                               golden_settings_gc_rlc_spm_10_1_2_nv12,
+                                               (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_1_2_nv12));
                break;
        default:
                break;
@@ -557,7 +3302,8 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        gpu_addr = adev->wb.gpu_addr + (index * 4);
        adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
        memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, 16, &ib);
+       r = amdgpu_ib_get(adev, NULL, 16,
+                                       AMDGPU_IB_POOL_DIRECT, &ib);
        if (r)
                goto err1;
 
@@ -1298,7 +4044,8 @@ static int gfx_v10_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id,
 
        irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe;
        r = amdgpu_ring_init(adev, ring, 1024,
-                            &adev->gfx.eop_irq, irq_type);
+                            &adev->gfx.eop_irq, irq_type,
+                            AMDGPU_RING_PRIO_DEFAULT);
        if (r)
                return r;
        return 0;
@@ -1309,7 +4056,8 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
 {
        int r;
        unsigned irq_type;
-       struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
+       struct amdgpu_ring *ring;
+       unsigned int hw_prio;
 
        ring = &adev->gfx.compute_ring[ring_id];
 
@@ -1328,10 +4076,11 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
        irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
                + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
                + ring->pipe;
-
+       hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue) ?
+                       AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
        /* type-2 packets are deprecated on MEC, use type-3 instead */
        r = amdgpu_ring_init(adev, ring, 1024,
-                            &adev->gfx.eop_irq, irq_type);
+                            &adev->gfx.eop_irq, irq_type, hw_prio);
        if (r)
                return r;
 
@@ -1829,9 +4578,9 @@ static int gfx_v10_0_init_csb(struct amdgpu_device *adev)
 
        /* csib */
        WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_HI,
-                    adev->gfx.rlc.clear_state_gpu_addr >> 32);
+                        adev->gfx.rlc.clear_state_gpu_addr >> 32);
        WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_LO,
-                    adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
+                        adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
        WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
 
        return 0;
@@ -2441,10 +5190,6 @@ static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
        tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
        tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
        tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
-       if (!enable) {
-               for (i = 0; i < adev->gfx.num_gfx_rings; i++)
-                       adev->gfx.gfx_ring[i].sched.ready = false;
-       }
        WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
 
        for (i = 0; i < adev->usec_timeout; i++) {
@@ -2923,16 +5668,12 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
 
 static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
 {
-       int i;
-
        if (enable) {
                WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 0);
        } else {
                WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
                             (CP_MEC_CNTL__MEC_ME1_HALT_MASK |
                              CP_MEC_CNTL__MEC_ME2_HALT_MASK));
-               for (i = 0; i < adev->gfx.num_compute_rings; i++)
-                       adev->gfx.compute_ring[i].sched.ready = false;
                adev->gfx.kiq.ring.sched.ready = false;
        }
        udelay(50);
@@ -3268,11 +6009,8 @@ static void gfx_v10_0_compute_mqd_set_priority(struct amdgpu_ring *ring, struct
        if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
                if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) {
                        mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
-                       ring->has_high_prio = true;
                        mqd->cp_hqd_queue_priority =
                                AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
-               } else {
-                       ring->has_high_prio = false;
                }
        }
 }
@@ -3802,14 +6540,16 @@ static int gfx_v10_0_hw_init(void *handle)
                 * loaded firstly, so in direct type, it has to load smc ucode
                 * here before rlc.
                 */
-               r = smu_load_microcode(&adev->smu);
-               if (r)
-                       return r;
+               if (adev->smu.ppt_funcs != NULL) {
+                       r = smu_load_microcode(&adev->smu);
+                       if (r)
+                               return r;
 
-               r = smu_check_fw_status(&adev->smu);
-               if (r) {
-                       pr_err("SMC firmware status is not correct\n");
-                       return r;
+                       r = smu_check_fw_status(&adev->smu);
+                       if (r) {
+                               pr_err("SMC firmware status is not correct\n");
+                               return r;
+                       }
                }
        }
 
@@ -4292,14 +7032,21 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
 
 static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
 {
-       u32 data;
+       u32 reg, data;
 
-       data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL);
+       reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL);
+       if (amdgpu_sriov_is_pp_one_vf(adev))
+               data = RREG32_NO_KIQ(reg);
+       else
+               data = RREG32(reg);
 
        data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
        data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
 
-       WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
+       if (amdgpu_sriov_is_pp_one_vf(adev))
+               WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
+       else
+               WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
 }
 
 static bool gfx_v10_0_check_rlcg_range(struct amdgpu_device *adev,
@@ -4341,6 +7088,20 @@ static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs = {
        .reset = gfx_v10_0_rlc_reset,
        .start = gfx_v10_0_rlc_start,
        .update_spm_vmid = gfx_v10_0_update_spm_vmid,
+};
+
+static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs_sriov = {
+       .is_rlc_enabled = gfx_v10_0_is_rlc_enabled,
+       .set_safe_mode = gfx_v10_0_set_safe_mode,
+       .unset_safe_mode = gfx_v10_0_unset_safe_mode,
+       .init = gfx_v10_0_rlc_init,
+       .get_csb_size = gfx_v10_0_get_csb_size,
+       .get_csb_buffer = gfx_v10_0_get_csb_buffer,
+       .resume = gfx_v10_0_rlc_resume,
+       .stop = gfx_v10_0_rlc_stop,
+       .reset = gfx_v10_0_rlc_reset,
+       .start = gfx_v10_0_rlc_start,
+       .update_spm_vmid = gfx_v10_0_update_spm_vmid,
        .rlcg_wreg = gfx_v10_rlcg_wreg,
        .is_rlcg_access_range = gfx_v10_0_is_rlcg_access_range,
 };
@@ -4350,6 +7111,10 @@ static int gfx_v10_0_set_powergating_state(void *handle,
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        bool enable = (state == AMD_PG_STATE_GATE);
+
+       if (amdgpu_sriov_vf(adev))
+               return 0;
+
        switch (adev->asic_type) {
        case CHIP_NAVI10:
        case CHIP_NAVI14:
@@ -4366,6 +7131,9 @@ static int gfx_v10_0_set_clockgating_state(void *handle,
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       if (amdgpu_sriov_vf(adev))
+               return 0;
+
        switch (adev->asic_type) {
        case CHIP_NAVI10:
        case CHIP_NAVI14:
@@ -4678,7 +7446,8 @@ static void gfx_v10_0_ring_emit_sb(struct amdgpu_ring *ring)
        amdgpu_ring_write(ring, 0);
 }
 
-static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
+static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring,
+                                        uint32_t flags)
 {
        uint32_t dw2 = 0;
 
@@ -4686,8 +7455,6 @@ static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flag
                gfx_v10_0_ring_emit_ce_meta(ring,
                                    (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
 
-       gfx_v10_0_ring_emit_tmz(ring, true);
-
        dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
        if (flags & AMDGPU_HAVE_CTX_SWITCH) {
                /* set load_global_config & load_global_uconfig */
@@ -4844,16 +7611,19 @@ static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume)
                                           sizeof(de_payload) >> 2);
 }
 
-static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
+static void gfx_v10_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start,
+                                   bool secure)
 {
+       uint32_t v = secure ? FRAME_TMZ : 0;
+
        amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
-       amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
+       amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1));
 }
 
-static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
+static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
+                                    uint32_t reg_val_offs)
 {
        struct amdgpu_device *adev = ring->adev;
-       struct amdgpu_kiq *kiq = &adev->gfx.kiq;
 
        amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
        amdgpu_ring_write(ring, 0 |     /* src: register*/
@@ -4862,9 +7632,9 @@ static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
        amdgpu_ring_write(ring, reg);
        amdgpu_ring_write(ring, 0);
        amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
-                               kiq->reg_val_offs * 4));
+                               reg_val_offs * 4));
        amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
-                               kiq->reg_val_offs * 4));
+                               reg_val_offs * 4));
 }
 
 static void gfx_v10_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
@@ -5250,6 +8020,29 @@ static int gfx_v10_0_kiq_irq(struct amdgpu_device *adev,
        return 0;
 }
 
+static void gfx_v10_0_emit_mem_sync(struct amdgpu_ring *ring)
+{
+       const unsigned int gcr_cntl =
+                       PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(1) |
+                       PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(1) |
+                       PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_INV(1) |
+                       PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_WB(1) |
+                       PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_INV(1) |
+                       PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(1) |
+                       PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(1) |
+                       PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(1);
+
+       /* ACQUIRE_MEM - make one or more surfaces valid for use by the subsequent operations */
+       amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 6));
+       amdgpu_ring_write(ring, 0); /* CP_COHER_CNTL */
+       amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
+       amdgpu_ring_write(ring, 0xffffff);  /* CP_COHER_SIZE_HI */
+       amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
+       amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE_HI */
+       amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
+       amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */
+}
+
 static const struct amd_ip_funcs gfx_v10_0_ip_funcs = {
        .name = "gfx_v10_0",
        .early_init = gfx_v10_0_early_init,
@@ -5297,7 +8090,8 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
                3 + /* CNTX_CTRL */
                5 + /* HDP_INVL */
                8 + 8 + /* FENCE x2 */
-               2, /* SWITCH_BUFFER */
+               2 + /* SWITCH_BUFFER */
+               8, /* gfx_v10_0_emit_mem_sync */
        .emit_ib_size = 4, /* gfx_v10_0_ring_emit_ib_gfx */
        .emit_ib = gfx_v10_0_ring_emit_ib_gfx,
        .emit_fence = gfx_v10_0_ring_emit_fence,
@@ -5314,11 +8108,12 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
        .init_cond_exec = gfx_v10_0_ring_emit_init_cond_exec,
        .patch_cond_exec = gfx_v10_0_ring_emit_patch_cond_exec,
        .preempt_ib = gfx_v10_0_ring_preempt_ib,
-       .emit_tmz = gfx_v10_0_ring_emit_tmz,
+       .emit_frame_cntl = gfx_v10_0_ring_emit_frame_cntl,
        .emit_wreg = gfx_v10_0_ring_emit_wreg,
        .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
        .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
        .soft_recovery = gfx_v10_0_ring_soft_recovery,
+       .emit_mem_sync = gfx_v10_0_emit_mem_sync,
 };
 
 static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
@@ -5338,7 +8133,8 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
                SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
                SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
                2 + /* gfx_v10_0_ring_emit_vm_flush */
-               8 + 8 + 8, /* gfx_v10_0_ring_emit_fence x3 for user fence, vm fence */
+               8 + 8 + 8 + /* gfx_v10_0_ring_emit_fence x3 for user fence, vm fence */
+               8, /* gfx_v10_0_emit_mem_sync */
        .emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_compute */
        .emit_ib = gfx_v10_0_ring_emit_ib_compute,
        .emit_fence = gfx_v10_0_ring_emit_fence,
@@ -5353,6 +8149,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
        .emit_wreg = gfx_v10_0_ring_emit_wreg,
        .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
        .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
+       .emit_mem_sync = gfx_v10_0_emit_mem_sync,
 };
 
 static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
@@ -5439,9 +8236,11 @@ static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev)
        switch (adev->asic_type) {
        case CHIP_NAVI10:
        case CHIP_NAVI14:
-       case CHIP_NAVI12:
                adev->gfx.rlc.funcs = &gfx_v10_0_rlc_funcs;
                break;
+       case CHIP_NAVI12:
+               adev->gfx.rlc.funcs = &gfx_v10_0_rlc_funcs_sriov;
+               break;
        default:
                break;
        }
index 31f44d05e606d1e367da11ff176b4279c81dc2a4..79c52c7a02e3a0bd7779c136beafa591daa0cdaf 100644 (file)
@@ -1914,7 +1914,8 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
        WREG32(scratch, 0xCAFEDEAD);
        memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, 256, &ib);
+       r = amdgpu_ib_get(adev, NULL, 256,
+                                       AMDGPU_IB_POOL_DIRECT, &ib);
        if (r)
                goto err1;
 
@@ -1950,7 +1951,6 @@ err1:
 
 static void gfx_v6_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
 {
-       int i;
        if (enable) {
                WREG32(mmCP_ME_CNTL, 0);
        } else {
@@ -1958,10 +1958,6 @@ static void gfx_v6_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
                                      CP_ME_CNTL__PFP_HALT_MASK |
                                      CP_ME_CNTL__CE_HALT_MASK));
                WREG32(mmSCRATCH_UMSK, 0);
-               for (i = 0; i < adev->gfx.num_gfx_rings; i++)
-                       adev->gfx.gfx_ring[i].sched.ready = false;
-               for (i = 0; i < adev->gfx.num_compute_rings; i++)
-                       adev->gfx.compute_ring[i].sched.ready = false;
        }
        udelay(50);
 }
@@ -3114,7 +3110,9 @@ static int gfx_v6_0_sw_init(void *handle)
                ring->ring_obj = NULL;
                sprintf(ring->name, "gfx");
                r = amdgpu_ring_init(adev, ring, 1024,
-                                    &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP);
+                                    &adev->gfx.eop_irq,
+                                    AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
+                                    AMDGPU_RING_PRIO_DEFAULT);
                if (r)
                        return r;
        }
@@ -3136,7 +3134,8 @@ static int gfx_v6_0_sw_init(void *handle)
                sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
                irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
                r = amdgpu_ring_init(adev, ring, 1024,
-                                    &adev->gfx.eop_irq, irq_type);
+                                    &adev->gfx.eop_irq, irq_type,
+                                    AMDGPU_RING_PRIO_DEFAULT);
                if (r)
                        return r;
        }
@@ -3466,6 +3465,18 @@ static int gfx_v6_0_set_powergating_state(void *handle,
        return 0;
 }
 
+static void gfx_v6_0_emit_mem_sync(struct amdgpu_ring *ring)
+{
+       amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+       amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+                         PACKET3_TC_ACTION_ENA |
+                         PACKET3_SH_KCACHE_ACTION_ENA |
+                         PACKET3_SH_ICACHE_ACTION_ENA);  /* CP_COHER_CNTL */
+       amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
+       amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE */
+       amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
+}
+
 static const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
        .name = "gfx_v6_0",
        .early_init = gfx_v6_0_early_init,
@@ -3496,7 +3507,8 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
                14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
                7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */
                SI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + 6 + /* gfx_v6_0_ring_emit_vm_flush */
-               3 + 2, /* gfx_v6_ring_emit_cntxcntl including vgt flush */
+               3 + 2 + /* gfx_v6_ring_emit_cntxcntl including vgt flush */
+               5, /* SURFACE_SYNC */
        .emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */
        .emit_ib = gfx_v6_0_ring_emit_ib,
        .emit_fence = gfx_v6_0_ring_emit_fence,
@@ -3507,6 +3519,7 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
        .insert_nop = amdgpu_ring_insert_nop,
        .emit_cntxcntl = gfx_v6_ring_emit_cntxcntl,
        .emit_wreg = gfx_v6_0_ring_emit_wreg,
+       .emit_mem_sync = gfx_v6_0_emit_mem_sync,
 };
 
 static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
@@ -3520,7 +3533,8 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
                5 + 5 + /* hdp flush / invalidate */
                7 + /* gfx_v6_0_ring_emit_pipeline_sync */
                SI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v6_0_ring_emit_vm_flush */
-               14 + 14 + 14, /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
+               14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
+               5, /* SURFACE_SYNC */
        .emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */
        .emit_ib = gfx_v6_0_ring_emit_ib,
        .emit_fence = gfx_v6_0_ring_emit_fence,
@@ -3530,6 +3544,7 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
        .test_ib = gfx_v6_0_ring_test_ib,
        .insert_nop = amdgpu_ring_insert_nop,
        .emit_wreg = gfx_v6_0_ring_emit_wreg,
+       .emit_mem_sync = gfx_v6_0_emit_mem_sync,
 };
 
 static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev)
index 733d398c61ccb7b6b94c4b4384f4fd4f6f1d4ddb..0cc011f9190d3eaabac0adc40f6883cff9462724 100644 (file)
@@ -2364,7 +2364,8 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
        WREG32(scratch, 0xCAFEDEAD);
        memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, 256, &ib);
+       r = amdgpu_ib_get(adev, NULL, 256,
+                                       AMDGPU_IB_POOL_DIRECT, &ib);
        if (r)
                goto err1;
 
@@ -2431,15 +2432,12 @@ err1:
  */
 static void gfx_v7_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
 {
-       int i;
-
-       if (enable) {
+       if (enable)
                WREG32(mmCP_ME_CNTL, 0);
-       } else {
-               WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK));
-               for (i = 0; i < adev->gfx.num_gfx_rings; i++)
-                       adev->gfx.gfx_ring[i].sched.ready = false;
-       }
+       else
+               WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK |
+                                     CP_ME_CNTL__PFP_HALT_MASK |
+                                     CP_ME_CNTL__CE_HALT_MASK));
        udelay(50);
 }
 
@@ -2700,15 +2698,11 @@ static void gfx_v7_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
  */
 static void gfx_v7_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
 {
-       int i;
-
-       if (enable) {
+       if (enable)
                WREG32(mmCP_MEC_CNTL, 0);
-       } else {
-               WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
-               for (i = 0; i < adev->gfx.num_compute_rings; i++)
-                       adev->gfx.compute_ring[i].sched.ready = false;
-       }
+       else
+               WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK |
+                                      CP_MEC_CNTL__MEC_ME2_HALT_MASK));
        udelay(50);
 }
 
@@ -4439,7 +4433,8 @@ static int gfx_v7_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
 
        /* type-2 packets are deprecated on MEC, use type-3 instead */
        r = amdgpu_ring_init(adev, ring, 1024,
-                       &adev->gfx.eop_irq, irq_type);
+                            &adev->gfx.eop_irq, irq_type,
+                            AMDGPU_RING_PRIO_DEFAULT);
        if (r)
                return r;
 
@@ -4511,7 +4506,9 @@ static int gfx_v7_0_sw_init(void *handle)
                ring->ring_obj = NULL;
                sprintf(ring->name, "gfx");
                r = amdgpu_ring_init(adev, ring, 1024,
-                                    &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP);
+                                    &adev->gfx.eop_irq,
+                                    AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
+                                    AMDGPU_RING_PRIO_DEFAULT);
                if (r)
                        return r;
        }
@@ -5001,6 +4998,32 @@ static int gfx_v7_0_set_powergating_state(void *handle,
        return 0;
 }
 
+static void gfx_v7_0_emit_mem_sync(struct amdgpu_ring *ring)
+{
+       amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+       amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+                         PACKET3_TC_ACTION_ENA |
+                         PACKET3_SH_KCACHE_ACTION_ENA |
+                         PACKET3_SH_ICACHE_ACTION_ENA);  /* CP_COHER_CNTL */
+       amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
+       amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE */
+       amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
+}
+
+static void gfx_v7_0_emit_mem_sync_compute(struct amdgpu_ring *ring)
+{
+       amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
+       amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+                         PACKET3_TC_ACTION_ENA |
+                         PACKET3_SH_KCACHE_ACTION_ENA |
+                         PACKET3_SH_ICACHE_ACTION_ENA);  /* CP_COHER_CNTL */
+       amdgpu_ring_write(ring, 0xffffffff);    /* CP_COHER_SIZE */
+       amdgpu_ring_write(ring, 0xff);          /* CP_COHER_SIZE_HI */
+       amdgpu_ring_write(ring, 0);             /* CP_COHER_BASE */
+       amdgpu_ring_write(ring, 0);             /* CP_COHER_BASE_HI */
+       amdgpu_ring_write(ring, 0x0000000A);    /* poll interval */
+}
+
 static const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
        .name = "gfx_v7_0",
        .early_init = gfx_v7_0_early_init,
@@ -5033,7 +5056,8 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
                12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
                7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
                CIK_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
-               3 + 4, /* gfx_v7_ring_emit_cntxcntl including vgt flush*/
+               3 + 4 + /* gfx_v7_ring_emit_cntxcntl including vgt flush*/
+               5, /* SURFACE_SYNC */
        .emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_gfx */
        .emit_ib = gfx_v7_0_ring_emit_ib_gfx,
        .emit_fence = gfx_v7_0_ring_emit_fence_gfx,
@@ -5048,6 +5072,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
        .emit_cntxcntl = gfx_v7_ring_emit_cntxcntl,
        .emit_wreg = gfx_v7_0_ring_emit_wreg,
        .soft_recovery = gfx_v7_0_ring_soft_recovery,
+       .emit_mem_sync = gfx_v7_0_emit_mem_sync,
 };
 
 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
@@ -5064,7 +5089,8 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
                5 + /* hdp invalidate */
                7 + /* gfx_v7_0_ring_emit_pipeline_sync */
                CIK_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v7_0_ring_emit_vm_flush */
-               7 + 7 + 7, /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
+               7 + 7 + 7 + /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
+               7, /* gfx_v7_0_emit_mem_sync_compute */
        .emit_ib_size = 7, /* gfx_v7_0_ring_emit_ib_compute */
        .emit_ib = gfx_v7_0_ring_emit_ib_compute,
        .emit_fence = gfx_v7_0_ring_emit_fence_compute,
@@ -5077,6 +5103,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
        .insert_nop = amdgpu_ring_insert_nop,
        .pad_ib = amdgpu_ring_generic_pad_ib,
        .emit_wreg = gfx_v7_0_ring_emit_wreg,
+       .emit_mem_sync = gfx_v7_0_emit_mem_sync_compute,
 };
 
 static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
index fc32586ef80b1a5c91117b5f469a17826f349408..1d4128227ffd6d1fda53755e91b5bf39c5490fe0 100644 (file)
@@ -888,7 +888,8 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        gpu_addr = adev->wb.gpu_addr + (index * 4);
        adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
        memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, 16, &ib);
+       r = amdgpu_ib_get(adev, NULL, 16,
+                                       AMDGPU_IB_POOL_DIRECT, &ib);
        if (r)
                goto err1;
 
@@ -1550,7 +1551,8 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
 
        /* allocate an indirect buffer to put the commands in */
        memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, total_size, &ib);
+       r = amdgpu_ib_get(adev, NULL, total_size,
+                                       AMDGPU_IB_POOL_DIRECT, &ib);
        if (r) {
                DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
                return r;
@@ -1892,6 +1894,7 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
        int r;
        unsigned irq_type;
        struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
+       unsigned int hw_prio;
 
        ring = &adev->gfx.compute_ring[ring_id];
 
@@ -1911,9 +1914,11 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
                + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
                + ring->pipe;
 
+       hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue) ?
+                       AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_RING_PRIO_DEFAULT;
        /* type-2 packets are deprecated on MEC, use type-3 instead */
        r = amdgpu_ring_init(adev, ring, 1024,
-                       &adev->gfx.eop_irq, irq_type);
+                            &adev->gfx.eop_irq, irq_type, hw_prio);
        if (r)
                return r;
 
@@ -2017,7 +2022,8 @@ static int gfx_v8_0_sw_init(void *handle)
                }
 
                r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
-                                    AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP);
+                                    AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
+                                    AMDGPU_RING_PRIO_DEFAULT);
                if (r)
                        return r;
        }
@@ -4120,7 +4126,6 @@ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
 
 static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
 {
-       int i;
        u32 tmp = RREG32(mmCP_ME_CNTL);
 
        if (enable) {
@@ -4131,8 +4136,6 @@ static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
                tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
                tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
                tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
-               for (i = 0; i < adev->gfx.num_gfx_rings; i++)
-                       adev->gfx.gfx_ring[i].sched.ready = false;
        }
        WREG32(mmCP_ME_CNTL, tmp);
        udelay(50);
@@ -4320,14 +4323,10 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
 
 static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
 {
-       int i;
-
        if (enable) {
                WREG32(mmCP_MEC_CNTL, 0);
        } else {
                WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
-               for (i = 0; i < adev->gfx.num_compute_rings; i++)
-                       adev->gfx.compute_ring[i].sched.ready = false;
                adev->gfx.kiq.ring.sched.ready = false;
        }
        udelay(50);
@@ -4437,11 +4436,8 @@ static void gfx_v8_0_mqd_set_priority(struct amdgpu_ring *ring, struct vi_mqd *m
        if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
                if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) {
                        mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
-                       ring->has_high_prio = true;
                        mqd->cp_hqd_queue_priority =
                                AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
-               } else {
-                       ring->has_high_prio = false;
                }
        }
 }
@@ -5619,12 +5615,18 @@ static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
 {
        u32 data;
 
-       data = RREG32(mmRLC_SPM_VMID);
+       if (amdgpu_sriov_is_pp_one_vf(adev))
+               data = RREG32_NO_KIQ(mmRLC_SPM_VMID);
+       else
+               data = RREG32(mmRLC_SPM_VMID);
 
        data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK;
        data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT;
 
-       WREG32(mmRLC_SPM_VMID, data);
+       if (amdgpu_sriov_is_pp_one_vf(adev))
+               WREG32_NO_KIQ(mmRLC_SPM_VMID, data);
+       else
+               WREG32(mmRLC_SPM_VMID, data);
 }
 
 static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
@@ -6387,10 +6389,10 @@ static void gfx_v8_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne
                ring->ring[offset] = (ring->ring_size >> 2) - offset + cur;
 }
 
-static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
+static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
+                                   uint32_t reg_val_offs)
 {
        struct amdgpu_device *adev = ring->adev;
-       struct amdgpu_kiq *kiq = &adev->gfx.kiq;
 
        amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
        amdgpu_ring_write(ring, 0 |     /* src: register*/
@@ -6399,9 +6401,9 @@ static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
        amdgpu_ring_write(ring, reg);
        amdgpu_ring_write(ring, 0);
        amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
-                               kiq->reg_val_offs * 4));
+                               reg_val_offs * 4));
        amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
-                               kiq->reg_val_offs * 4));
+                               reg_val_offs * 4));
 }
 
 static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
@@ -6815,6 +6817,34 @@ static int gfx_v8_0_sq_irq(struct amdgpu_device *adev,
        return 0;
 }
 
+static void gfx_v8_0_emit_mem_sync(struct amdgpu_ring *ring)
+{
+       amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+       amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+                         PACKET3_TC_ACTION_ENA |
+                         PACKET3_SH_KCACHE_ACTION_ENA |
+                         PACKET3_SH_ICACHE_ACTION_ENA |
+                         PACKET3_TC_WB_ACTION_ENA);  /* CP_COHER_CNTL */
+       amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
+       amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE */
+       amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
+}
+
+static void gfx_v8_0_emit_mem_sync_compute(struct amdgpu_ring *ring)
+{
+       amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
+       amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+                         PACKET3_TC_ACTION_ENA |
+                         PACKET3_SH_KCACHE_ACTION_ENA |
+                         PACKET3_SH_ICACHE_ACTION_ENA |
+                         PACKET3_TC_WB_ACTION_ENA);  /* CP_COHER_CNTL */
+       amdgpu_ring_write(ring, 0xffffffff);    /* CP_COHER_SIZE */
+       amdgpu_ring_write(ring, 0xff);          /* CP_COHER_SIZE_HI */
+       amdgpu_ring_write(ring, 0);             /* CP_COHER_BASE */
+       amdgpu_ring_write(ring, 0);             /* CP_COHER_BASE_HI */
+       amdgpu_ring_write(ring, 0x0000000A);    /* poll interval */
+}
+
 static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
        .name = "gfx_v8_0",
        .early_init = gfx_v8_0_early_init,
@@ -6861,7 +6891,8 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
                3 + /* CNTX_CTRL */
                5 + /* HDP_INVL */
                12 + 12 + /* FENCE x2 */
-               2, /* SWITCH_BUFFER */
+               2 + /* SWITCH_BUFFER */
+               5, /* SURFACE_SYNC */
        .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */
        .emit_ib = gfx_v8_0_ring_emit_ib_gfx,
        .emit_fence = gfx_v8_0_ring_emit_fence_gfx,
@@ -6879,6 +6910,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
        .patch_cond_exec = gfx_v8_0_ring_emit_patch_cond_exec,
        .emit_wreg = gfx_v8_0_ring_emit_wreg,
        .soft_recovery = gfx_v8_0_ring_soft_recovery,
+       .emit_mem_sync = gfx_v8_0_emit_mem_sync,
 };
 
 static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
@@ -6895,7 +6927,8 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
                5 + /* hdp_invalidate */
                7 + /* gfx_v8_0_ring_emit_pipeline_sync */
                VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v8_0_ring_emit_vm_flush */
-               7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
+               7 + 7 + 7 + /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
+               7, /* gfx_v8_0_emit_mem_sync_compute */
        .emit_ib_size = 7, /* gfx_v8_0_ring_emit_ib_compute */
        .emit_ib = gfx_v8_0_ring_emit_ib_compute,
        .emit_fence = gfx_v8_0_ring_emit_fence_compute,
@@ -6908,6 +6941,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
        .insert_nop = amdgpu_ring_insert_nop,
        .pad_ib = amdgpu_ring_generic_pad_ib,
        .emit_wreg = gfx_v8_0_ring_emit_wreg,
+       .emit_mem_sync = gfx_v8_0_emit_mem_sync_compute,
 };
 
 static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
index d2d9dce68c2f186fc6544e37551008c451eed06a..711e9dd1970555beb79c47f688ad9dea7b5b5d56 100644 (file)
 
 #include "gfx_v9_4.h"
 
+#include "asic_reg/pwr/pwr_10_0_offset.h"
+#include "asic_reg/pwr/pwr_10_0_sh_mask.h"
+
 #define GFX9_NUM_GFX_RINGS     1
 #define GFX9_MEC_HPD_SIZE 4096
 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
 #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
 
-#define mmPWR_MISC_CNTL_STATUS                                 0x0183
-#define mmPWR_MISC_CNTL_STATUS_BASE_IDX                                0
-#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT       0x0
-#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT         0x1
-#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK         0x00000001L
-#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK           0x00000006L
-
 #define mmGCEA_PROBE_MAP                        0x070c
 #define mmGCEA_PROBE_MAP_BASE_IDX               0
 
@@ -511,8 +507,8 @@ static const struct soc15_reg_golden golden_settings_gc_9_0[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x00ffff87),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x00ffff8f),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
@@ -963,7 +959,7 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
        case CHIP_RAVEN:
                soc15_program_register_sequence(adev, golden_settings_gc_9_1,
                                                ARRAY_SIZE(golden_settings_gc_9_1));
-               if (adev->rev_id >= 8)
+               if (adev->apu_flags & AMD_APU_IS_RAVEN2)
                        soc15_program_register_sequence(adev,
                                                        golden_settings_gc_9_1_rv2,
                                                        ARRAY_SIZE(golden_settings_gc_9_1_rv2));
@@ -1082,7 +1078,8 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        gpu_addr = adev->wb.gpu_addr + (index * 4);
        adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
        memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, 16, &ib);
+       r = amdgpu_ib_get(adev, NULL, 16,
+                                       AMDGPU_IB_POOL_DIRECT, &ib);
        if (r)
                goto err1;
 
@@ -1277,7 +1274,8 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
        case CHIP_VEGA20:
                break;
        case CHIP_RAVEN:
-               if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) &&
+               if (!((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
+                     (adev->apu_flags & AMD_APU_IS_PICASSO)) &&
                    ((!is_raven_kicker(adev) &&
                      adev->gfx.rlc_fw_version < 531) ||
                     (adev->gfx.rlc_feature_version < 1) ||
@@ -1620,9 +1618,9 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
                chip_name = "vega20";
                break;
        case CHIP_RAVEN:
-               if (adev->rev_id >= 8)
+               if (adev->apu_flags & AMD_APU_IS_RAVEN2)
                        chip_name = "raven2";
-               else if (adev->pdev->device == 0x15d8)
+               else if (adev->apu_flags & AMD_APU_IS_PICASSO)
                        chip_name = "picasso";
                else
                        chip_name = "raven";
@@ -2122,7 +2120,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
                adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
                adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
                adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
-               if (adev->rev_id >= 8)
+               if (adev->apu_flags & AMD_APU_IS_RAVEN2)
                        gb_addr_config = RAVEN2_GB_ADDR_CONFIG_GOLDEN;
                else
                        gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
@@ -2199,6 +2197,7 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
        int r;
        unsigned irq_type;
        struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
+       unsigned int hw_prio;
 
        ring = &adev->gfx.compute_ring[ring_id];
 
@@ -2217,10 +2216,11 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
        irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
                + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
                + ring->pipe;
-
+       hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue) ?
+                       AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
        /* type-2 packets are deprecated on MEC, use type-3 instead */
        r = amdgpu_ring_init(adev, ring, 1024,
-                            &adev->gfx.eop_irq, irq_type);
+                            &adev->gfx.eop_irq, irq_type, hw_prio);
        if (r)
                return r;
 
@@ -2314,7 +2314,9 @@ static int gfx_v9_0_sw_init(void *handle)
                ring->use_doorbell = true;
                ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
                r = amdgpu_ring_init(adev, ring, 1024,
-                                    &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP);
+                                    &adev->gfx.eop_irq,
+                                    AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
+                                    AMDGPU_RING_PRIO_DEFAULT);
                if (r)
                        return r;
        }
@@ -2532,7 +2534,7 @@ static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev)
                break;
        default:
                break;
-       };
+       }
 }
 
 static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
@@ -2967,8 +2969,7 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
         */
        if (adev->gfx.rlc.is_rlc_v2_1) {
                if (adev->asic_type == CHIP_VEGA12 ||
-                   (adev->asic_type == CHIP_RAVEN &&
-                    adev->rev_id >= 8))
+                   (adev->apu_flags & AMD_APU_IS_RAVEN2))
                        gfx_v9_1_init_rlc_save_restore_list(adev);
                gfx_v9_0_enable_save_restore_machine(adev);
        }
@@ -3104,16 +3105,11 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
 
 static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
 {
-       int i;
        u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
 
        tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
        tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
        tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
-       if (!enable) {
-               for (i = 0; i < adev->gfx.num_gfx_rings; i++)
-                       adev->gfx.gfx_ring[i].sched.ready = false;
-       }
        WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
        udelay(50);
 }
@@ -3309,15 +3305,11 @@ static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
 
 static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
 {
-       int i;
-
        if (enable) {
                WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL, 0);
        } else {
                WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL,
                        (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
-               for (i = 0; i < adev->gfx.num_compute_rings; i++)
-                       adev->gfx.compute_ring[i].sched.ready = false;
                adev->gfx.kiq.ring.sched.ready = false;
        }
        udelay(50);
@@ -3387,11 +3379,8 @@ static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *m
        if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
                if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) {
                        mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
-                       ring->has_high_prio = true;
                        mqd->cp_hqd_queue_priority =
                                AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
-               } else {
-                       ring->has_high_prio = false;
                }
        }
 }
@@ -4058,13 +4047,18 @@ static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
 {
        signed long r, cnt = 0;
        unsigned long flags;
-       uint32_t seq;
+       uint32_t seq, reg_val_offs = 0;
+       uint64_t value = 0;
        struct amdgpu_kiq *kiq = &adev->gfx.kiq;
        struct amdgpu_ring *ring = &kiq->ring;
 
        BUG_ON(!ring->funcs->emit_rreg);
 
        spin_lock_irqsave(&kiq->ring_lock, flags);
+       if (amdgpu_device_wb_get(adev, &reg_val_offs)) {
+               pr_err("critical bug! too many kiq readers\n");
+               goto failed_unlock;
+       }
        amdgpu_ring_alloc(ring, 32);
        amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
        amdgpu_ring_write(ring, 9 |     /* src: register*/
@@ -4074,10 +4068,13 @@ static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
        amdgpu_ring_write(ring, 0);
        amdgpu_ring_write(ring, 0);
        amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
-                               kiq->reg_val_offs * 4));
+                               reg_val_offs * 4));
        amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
-                               kiq->reg_val_offs * 4));
-       amdgpu_fence_emit_polling(ring, &seq);
+                               reg_val_offs * 4));
+       r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+       if (r)
+               goto failed_undo;
+
        amdgpu_ring_commit(ring);
        spin_unlock_irqrestore(&kiq->ring_lock, flags);
 
@@ -4103,10 +4100,19 @@ static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
        if (cnt > MAX_KIQ_REG_TRY)
                goto failed_kiq_read;
 
-       return (uint64_t)adev->wb.wb[kiq->reg_val_offs] |
-               (uint64_t)adev->wb.wb[kiq->reg_val_offs + 1 ] << 32ULL;
+       mb();
+       value = (uint64_t)adev->wb.wb[reg_val_offs] |
+               (uint64_t)adev->wb.wb[reg_val_offs + 1 ] << 32ULL;
+       amdgpu_device_wb_free(adev, reg_val_offs);
+       return value;
 
+failed_undo:
+       amdgpu_ring_undo(ring);
+failed_unlock:
+       spin_unlock_irqrestore(&kiq->ring_lock, flags);
 failed_kiq_read:
+       if (reg_val_offs)
+               amdgpu_device_wb_free(adev, reg_val_offs);
        pr_err("failed to read gpu clock\n");
        return ~0;
 }
@@ -4491,7 +4497,8 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
 
        /* allocate an indirect buffer to put the commands in */
        memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, total_size, &ib);
+       r = amdgpu_ib_get(adev, NULL, total_size,
+                                       AMDGPU_IB_POOL_DIRECT, &ib);
        if (r) {
                DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
                return r;
@@ -4962,14 +4969,21 @@ static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
 
 static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
 {
-       u32 data;
+       u32 reg, data;
 
-       data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL);
+       reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL);
+       if (amdgpu_sriov_is_pp_one_vf(adev))
+               data = RREG32_NO_KIQ(reg);
+       else
+               data = RREG32(reg);
 
        data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
        data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
 
-       WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
+       if (amdgpu_sriov_is_pp_one_vf(adev))
+               WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
+       else
+               WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
 }
 
 static bool gfx_v9_0_check_rlcg_range(struct amdgpu_device *adev,
@@ -5424,10 +5438,13 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
        amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
 }
 
-static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
+static void gfx_v9_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start,
+                                  bool secure)
 {
+       uint32_t v = secure ? FRAME_TMZ : 0;
+
        amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
-       amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
+       amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1));
 }
 
 static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
@@ -5437,8 +5454,6 @@ static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
        if (amdgpu_sriov_vf(ring->adev))
                gfx_v9_0_ring_emit_ce_meta(ring);
 
-       gfx_v9_0_ring_emit_tmz(ring, true);
-
        dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
        if (flags & AMDGPU_HAVE_CTX_SWITCH) {
                /* set load_global_config & load_global_uconfig */
@@ -5489,10 +5504,10 @@ static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne
                ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
 }
 
-static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
+static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
+                                   uint32_t reg_val_offs)
 {
        struct amdgpu_device *adev = ring->adev;
-       struct amdgpu_kiq *kiq = &adev->gfx.kiq;
 
        amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
        amdgpu_ring_write(ring, 0 |     /* src: register*/
@@ -5501,9 +5516,9 @@ static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
        amdgpu_ring_write(ring, reg);
        amdgpu_ring_write(ring, 0);
        amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
-                               kiq->reg_val_offs * 4));
+                               reg_val_offs * 4));
        amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
-                               kiq->reg_val_offs * 4));
+                               reg_val_offs * 4));
 }
 
 static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
@@ -6404,15 +6419,15 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
 
                sec_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, SEC_COUNT);
                if (sec_count) {
-                       DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
-                                vml2_mems[i], sec_count);
+                       dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
+                               "SEC %d\n", i, vml2_mems[i], sec_count);
                        err_data->ce_count += sec_count;
                }
 
                ded_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, DED_COUNT);
                if (ded_count) {
-                       DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
-                                vml2_mems[i], ded_count);
+                       dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
+                               "DED %d\n", i, vml2_mems[i], ded_count);
                        err_data->ue_count += ded_count;
                }
        }
@@ -6424,16 +6439,16 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
                sec_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
                                                SEC_COUNT);
                if (sec_count) {
-                       DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
-                                vml2_walker_mems[i], sec_count);
+                       dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
+                               "SEC %d\n", i, vml2_walker_mems[i], sec_count);
                        err_data->ce_count += sec_count;
                }
 
                ded_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
                                                DED_COUNT);
                if (ded_count) {
-                       DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
-                                vml2_walker_mems[i], ded_count);
+                       dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
+                               "DED %d\n", i, vml2_walker_mems[i], ded_count);
                        err_data->ue_count += ded_count;
                }
        }
@@ -6444,8 +6459,9 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
 
                sec_count = (data & 0x00006000L) >> 0xd;
                if (sec_count) {
-                       DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
-                                atc_l2_cache_2m_mems[i], sec_count);
+                       dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
+                               "SEC %d\n", i, atc_l2_cache_2m_mems[i],
+                               sec_count);
                        err_data->ce_count += sec_count;
                }
        }
@@ -6456,15 +6472,17 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
 
                sec_count = (data & 0x00006000L) >> 0xd;
                if (sec_count) {
-                       DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
-                                atc_l2_cache_4k_mems[i], sec_count);
+                       dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
+                               "SEC %d\n", i, atc_l2_cache_4k_mems[i],
+                               sec_count);
                        err_data->ce_count += sec_count;
                }
 
                ded_count = (data & 0x00018000L) >> 0xf;
                if (ded_count) {
-                       DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
-                                atc_l2_cache_4k_mems[i], ded_count);
+                       dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
+                               "DED %d\n", i, atc_l2_cache_4k_mems[i],
+                               ded_count);
                        err_data->ue_count += ded_count;
                }
        }
@@ -6477,7 +6495,8 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
        return 0;
 }
 
-static int gfx_v9_0_ras_error_count(const struct soc15_reg_entry *reg,
+static int gfx_v9_0_ras_error_count(struct amdgpu_device *adev,
+       const struct soc15_reg_entry *reg,
        uint32_t se_id, uint32_t inst_id, uint32_t value,
        uint32_t *sec_count, uint32_t *ded_count)
 {
@@ -6494,7 +6513,8 @@ static int gfx_v9_0_ras_error_count(const struct soc15_reg_entry *reg,
                                gfx_v9_0_ras_fields[i].sec_count_mask) >>
                                gfx_v9_0_ras_fields[i].sec_count_shift;
                if (sec_cnt) {
-                       DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n",
+                       dev_info(adev->dev, "GFX SubBlock %s, "
+                               "Instance[%d][%d], SEC %d\n",
                                gfx_v9_0_ras_fields[i].name,
                                se_id, inst_id,
                                sec_cnt);
@@ -6505,7 +6525,8 @@ static int gfx_v9_0_ras_error_count(const struct soc15_reg_entry *reg,
                                gfx_v9_0_ras_fields[i].ded_count_mask) >>
                                gfx_v9_0_ras_fields[i].ded_count_shift;
                if (ded_cnt) {
-                       DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n",
+                       dev_info(adev->dev, "GFX SubBlock %s, "
+                               "Instance[%d][%d], DED %d\n",
                                gfx_v9_0_ras_fields[i].name,
                                se_id, inst_id,
                                ded_cnt);
@@ -6594,9 +6615,10 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
                                reg_value =
                                        RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
                                if (reg_value)
-                                       gfx_v9_0_ras_error_count(&gfx_v9_0_edc_counter_regs[i],
-                                                       j, k, reg_value,
-                                                       &sec_count, &ded_count);
+                                       gfx_v9_0_ras_error_count(adev,
+                                               &gfx_v9_0_edc_counter_regs[i],
+                                               j, k, reg_value,
+                                               &sec_count, &ded_count);
                        }
                }
        }
@@ -6612,6 +6634,25 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
        return 0;
 }
 
+static void gfx_v9_0_emit_mem_sync(struct amdgpu_ring *ring)
+{
+       const unsigned int cp_coher_cntl =
+                       PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) |
+                       PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) |
+                       PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) |
+                       PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) |
+                       PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1);
+
+       /* ACQUIRE_MEM -make one or more surfaces valid for use by the subsequent operations */
+       amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
+       amdgpu_ring_write(ring, cp_coher_cntl); /* CP_COHER_CNTL */
+       amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
+       amdgpu_ring_write(ring, 0xffffff);  /* CP_COHER_SIZE_HI */
+       amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
+       amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE_HI */
+       amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
+}
+
 static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
        .name = "gfx_v9_0",
        .early_init = gfx_v9_0_early_init,
@@ -6658,7 +6699,8 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
                3 + /* CNTX_CTRL */
                5 + /* HDP_INVL */
                8 + 8 + /* FENCE x2 */
-               2, /* SWITCH_BUFFER */
+               2 + /* SWITCH_BUFFER */
+               7, /* gfx_v9_0_emit_mem_sync */
        .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */
        .emit_ib = gfx_v9_0_ring_emit_ib_gfx,
        .emit_fence = gfx_v9_0_ring_emit_fence,
@@ -6674,11 +6716,12 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
        .emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
        .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
        .patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec,
-       .emit_tmz = gfx_v9_0_ring_emit_tmz,
+       .emit_frame_cntl = gfx_v9_0_ring_emit_frame_cntl,
        .emit_wreg = gfx_v9_0_ring_emit_wreg,
        .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
        .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
        .soft_recovery = gfx_v9_0_ring_soft_recovery,
+       .emit_mem_sync = gfx_v9_0_emit_mem_sync,
 };
 
 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
@@ -6698,7 +6741,8 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
                SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
                SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
                2 + /* gfx_v9_0_ring_emit_vm_flush */
-               8 + 8 + 8, /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
+               8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
+               7, /* gfx_v9_0_emit_mem_sync */
        .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
        .emit_ib = gfx_v9_0_ring_emit_ib_compute,
        .emit_fence = gfx_v9_0_ring_emit_fence,
@@ -6713,6 +6757,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
        .emit_wreg = gfx_v9_0_ring_emit_wreg,
        .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
        .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
+       .emit_mem_sync = gfx_v9_0_emit_mem_sync,
 };
 
 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
@@ -6836,7 +6881,7 @@ static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
                adev->gds.gds_compute_max_wave_id = 0x27f;
                break;
        case CHIP_RAVEN:
-               if (adev->rev_id >= 0x8)
+               if (adev->apu_flags & AMD_APU_IS_RAVEN2)
                        adev->gds.gds_compute_max_wave_id = 0x77; /* raven2 */
                else
                        adev->gds.gds_compute_max_wave_id = 0x15f; /* raven1 */
old mode 100644 (file)
new mode 100755 (executable)
index dce945e..46351db
@@ -732,7 +732,8 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
                sec_count = REG_GET_FIELD(data, VML2_WALKER_MEM_ECC_CNTL,
                                          SEC_COUNT);
                if (sec_count) {
-                       DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+                       dev_info(adev->dev,
+                                "Instance[%d]: SubBlock %s, SEC %d\n", i,
                                 vml2_walker_mems[i], sec_count);
                        err_data->ce_count += sec_count;
                }
@@ -740,7 +741,8 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
                ded_count = REG_GET_FIELD(data, VML2_WALKER_MEM_ECC_CNTL,
                                          DED_COUNT);
                if (ded_count) {
-                       DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+                       dev_info(adev->dev,
+                                "Instance[%d]: SubBlock %s, DED %d\n", i,
                                 vml2_walker_mems[i], ded_count);
                        err_data->ue_count += ded_count;
                }
@@ -752,14 +754,16 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
 
                sec_count = REG_GET_FIELD(data, UTCL2_MEM_ECC_CNTL, SEC_COUNT);
                if (sec_count) {
-                       DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+                       dev_info(adev->dev,
+                                "Instance[%d]: SubBlock %s, SEC %d\n", i,
                                 utcl2_router_mems[i], sec_count);
                        err_data->ce_count += sec_count;
                }
 
                ded_count = REG_GET_FIELD(data, UTCL2_MEM_ECC_CNTL, DED_COUNT);
                if (ded_count) {
-                       DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+                       dev_info(adev->dev,
+                                "Instance[%d]: SubBlock %s, DED %d\n", i,
                                 utcl2_router_mems[i], ded_count);
                        err_data->ue_count += ded_count;
                }
@@ -772,7 +776,8 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
                sec_count = REG_GET_FIELD(data, ATC_L2_CACHE_2M_DSM_CNTL,
                                          SEC_COUNT);
                if (sec_count) {
-                       DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+                       dev_info(adev->dev,
+                                "Instance[%d]: SubBlock %s, SEC %d\n", i,
                                 atc_l2_cache_2m_mems[i], sec_count);
                        err_data->ce_count += sec_count;
                }
@@ -780,7 +785,8 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
                ded_count = REG_GET_FIELD(data, ATC_L2_CACHE_2M_DSM_CNTL,
                                          DED_COUNT);
                if (ded_count) {
-                       DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+                       dev_info(adev->dev,
+                                "Instance[%d]: SubBlock %s, DED %d\n", i,
                                 atc_l2_cache_2m_mems[i], ded_count);
                        err_data->ue_count += ded_count;
                }
@@ -793,7 +799,8 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
                sec_count = REG_GET_FIELD(data, ATC_L2_CACHE_4K_DSM_CNTL,
                                          SEC_COUNT);
                if (sec_count) {
-                       DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+                       dev_info(adev->dev,
+                                "Instance[%d]: SubBlock %s, SEC %d\n", i,
                                 atc_l2_cache_4k_mems[i], sec_count);
                        err_data->ce_count += sec_count;
                }
@@ -801,7 +808,8 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
                ded_count = REG_GET_FIELD(data, ATC_L2_CACHE_4K_DSM_CNTL,
                                          DED_COUNT);
                if (ded_count) {
-                       DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+                       dev_info(adev->dev,
+                                "Instance[%d]: SubBlock %s, DED %d\n", i,
                                 atc_l2_cache_4k_mems[i], ded_count);
                        err_data->ue_count += ded_count;
                }
@@ -816,7 +824,8 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
        return 0;
 }
 
-static int gfx_v9_4_ras_error_count(const struct soc15_reg_entry *reg,
+static int gfx_v9_4_ras_error_count(struct amdgpu_device *adev,
+                                   const struct soc15_reg_entry *reg,
                                    uint32_t se_id, uint32_t inst_id,
                                    uint32_t value, uint32_t *sec_count,
                                    uint32_t *ded_count)
@@ -833,7 +842,8 @@ static int gfx_v9_4_ras_error_count(const struct soc15_reg_entry *reg,
                sec_cnt = (value & gfx_v9_4_ras_fields[i].sec_count_mask) >>
                          gfx_v9_4_ras_fields[i].sec_count_shift;
                if (sec_cnt) {
-                       DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n",
+                       dev_info(adev->dev,
+                                "GFX SubBlock %s, Instance[%d][%d], SEC %d\n",
                                 gfx_v9_4_ras_fields[i].name, se_id, inst_id,
                                 sec_cnt);
                        *sec_count += sec_cnt;
@@ -842,7 +852,8 @@ static int gfx_v9_4_ras_error_count(const struct soc15_reg_entry *reg,
                ded_cnt = (value & gfx_v9_4_ras_fields[i].ded_count_mask) >>
                          gfx_v9_4_ras_fields[i].ded_count_shift;
                if (ded_cnt) {
-                       DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n",
+                       dev_info(adev->dev,
+                                "GFX SubBlock %s, Instance[%d][%d], DED %d\n",
                                 gfx_v9_4_ras_fields[i].name, se_id, inst_id,
                                 ded_cnt);
                        *ded_count += ded_cnt;
@@ -876,7 +887,7 @@ int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
                                reg_value = RREG32(SOC15_REG_ENTRY_OFFSET(
                                        gfx_v9_4_edc_counter_regs[i]));
                                if (reg_value)
-                                       gfx_v9_4_ras_error_count(
+                                       gfx_v9_4_ras_error_count(adev,
                                                &gfx_v9_4_edc_counter_regs[i],
                                                j, k, reg_value, &sec_count,
                                                &ded_count);
index 1a2f18b908fee4040a3e4c93953331ca6c3ead14..6682b843bafe467ac3a4c87bc87a6bb73b197d30 100644 (file)
@@ -80,7 +80,7 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
                WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
                        min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
 
-               if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
+               if (adev->apu_flags & AMD_APU_IS_RAVEN2)
                        /*
                        * Raven2 has a HW issue that it is unable to use the
                        * vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR.
index 9775eca6fe434e044d21741eb5123ca16f2ec340..ba2b7ac0c02da0af321117e2ce5ec7109e383af8 100644 (file)
@@ -170,6 +170,9 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
                        dev_err(adev->dev,
                                "GCVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
                                status);
+                       dev_err(adev->dev, "\t Faulty UTCL2 client ID: 0x%lx\n",
+                               REG_GET_FIELD(status,
+                               GCVM_L2_PROTECTION_FAULT_STATUS, CID));
                        dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
                                REG_GET_FIELD(status,
                                GCVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
@@ -369,7 +372,8 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
         * translation. Avoid this by doing the invalidation from the SDMA
         * itself.
         */
-       r = amdgpu_job_alloc_with_ib(adev, 16 * 4, &job);
+       r = amdgpu_job_alloc_with_ib(adev, 16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
+                                    &job);
        if (r)
                goto error_alloc;
 
@@ -423,7 +427,13 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
                amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
                kiq->pmf->kiq_invalidate_tlbs(ring,
                                        pasid, flush_type, all_hub);
-               amdgpu_fence_emit_polling(ring, &seq);
+               r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+               if (r) {
+                       amdgpu_ring_undo(ring);
+                       spin_unlock(&adev->gfx.kiq.ring_lock);
+                       return -ETIME;
+               }
+
                amdgpu_ring_commit(ring);
                spin_unlock(&adev->gfx.kiq.ring_lock);
                r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
@@ -676,17 +686,23 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
  */
 static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
 {
-       /* Could aper size report 0 ? */
-       adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
-       adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
+       int r;
 
        /* size in MB on si */
        adev->gmc.mc_vram_size =
                adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
        adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
-       adev->gmc.visible_vram_size = adev->gmc.aper_size;
+
+       if (!(adev->flags & AMD_IS_APU)) {
+               r = amdgpu_device_resize_fb_bar(adev);
+               if (r)
+                       return r;
+       }
+       adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
+       adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
 
        /* In case the PCI BAR is larger than the actual amount of vram */
+       adev->gmc.visible_vram_size = adev->gmc.aper_size;
        if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
                adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
 
index b205039350b6c9d469ecdb3d738a71ea3cb40f17..a75e472b4a81ea8494eb4341b606b409d8cea594 100644 (file)
@@ -61,17 +61,6 @@ MODULE_FIRMWARE("amdgpu/si58_mc.bin");
 #define MC_SEQ_MISC0__MT__HBM    0x60000000
 #define MC_SEQ_MISC0__MT__DDR3   0xB0000000
 
-
-static const u32 crtc_offsets[6] =
-{
-       SI_CRTC0_REGISTER_OFFSET,
-       SI_CRTC1_REGISTER_OFFSET,
-       SI_CRTC2_REGISTER_OFFSET,
-       SI_CRTC3_REGISTER_OFFSET,
-       SI_CRTC4_REGISTER_OFFSET,
-       SI_CRTC5_REGISTER_OFFSET
-};
-
 static void gmc_v6_0_mc_stop(struct amdgpu_device *adev)
 {
        u32 blackout;
@@ -858,7 +847,7 @@ static int gmc_v6_0_sw_init(void *handle)
 
        r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
        if (r) {
-               dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
+               dev_warn(adev->dev, "No suitable DMA available.\n");
                return r;
        }
        adev->need_swiotlb = drm_need_swiotlb(44);
index 9da9596a36388d31e4c2cc5fb106df3c962dcd2f..bcd4baecfe115c15f4a35b5b7a313b1c199d2574 100644 (file)
@@ -762,6 +762,7 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
  * @adev: amdgpu_device pointer
  * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
  * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
+ * @mc_client: VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT register value
  *
  * Print human readable fault information (CIK).
  */
@@ -1019,7 +1020,7 @@ static int gmc_v7_0_sw_init(void *handle)
 
        r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
        if (r) {
-               pr_warn("amdgpu: No suitable DMA available\n");
+               pr_warn("No suitable DMA available\n");
                return r;
        }
        adev->need_swiotlb = drm_need_swiotlb(40);
index 27d83204fa2b00c3a8e663ceb18402ce39ba24e0..26976e50e2a2917c2762bb9a6fe78b3b4a4e1744 100644 (file)
@@ -1005,6 +1005,7 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
  * @adev: amdgpu_device pointer
  * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
  * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
+ * @mc_client: VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT register value
  *
  * Print human readable fault information (VI).
  */
@@ -1144,7 +1145,7 @@ static int gmc_v8_0_sw_init(void *handle)
 
        r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
        if (r) {
-               pr_warn("amdgpu: No suitable DMA available\n");
+               pr_warn("No suitable DMA available\n");
                return r;
        }
        adev->need_swiotlb = drm_need_swiotlb(40);
index 8606f877478f89b8fc7fbff1cf9bed83311a4197..11e93a82131def7ee7f231d1f29b06e81cddaaf4 100644 (file)
@@ -362,6 +362,9 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
                        dev_err(adev->dev,
                                "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
                                status);
+                       dev_err(adev->dev, "\t Faulty UTCL2 client ID: 0x%lx\n",
+                               REG_GET_FIELD(status,
+                               VM_L2_PROTECTION_FAULT_STATUS, CID));
                        dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
                                REG_GET_FIELD(status,
                                VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
@@ -438,9 +441,8 @@ static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
        return ((vmhub == AMDGPU_MMHUB_0 ||
                 vmhub == AMDGPU_MMHUB_1) &&
                (!amdgpu_sriov_vf(adev)) &&
-               (!(adev->asic_type == CHIP_RAVEN &&
-                  adev->rev_id < 0x8 &&
-                  adev->pdev->device == 0x15d8)));
+               (!(!(adev->apu_flags & AMD_APU_IS_RAVEN2) &&
+                  (adev->apu_flags & AMD_APU_IS_PICASSO))));
 }
 
 static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
@@ -618,7 +620,13 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
                                                      pasid, 2, all_hub);
                kiq->pmf->kiq_invalidate_tlbs(ring,
                                        pasid, flush_type, all_hub);
-               amdgpu_fence_emit_polling(ring, &seq);
+               r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+               if (r) {
+                       amdgpu_ring_undo(ring);
+                       spin_unlock(&adev->gfx.kiq.ring_lock);
+                       return -ETIME;
+               }
+
                amdgpu_ring_commit(ring);
                spin_unlock(&adev->gfx.kiq.ring_lock);
                r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
index 0debfd9f428c1b236c468277002135bc2a6e7e65..b10c95cad9a2edfd5ebb3bdecdc031c5bfe8f685 100644 (file)
@@ -480,7 +480,8 @@ int jpeg_v1_0_sw_init(void *handle)
 
        ring = &adev->jpeg.inst->ring_dec;
        sprintf(ring->name, "jpeg_dec");
-       r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0);
+       r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq,
+                            0, AMDGPU_RING_PRIO_DEFAULT);
        if (r)
                return r;
 
index 6173951db7b4527875a6cb53b043a124c1ae8227..e67d09cb1b03ecc06ad1b4ae457c910317aa2244 100644 (file)
@@ -106,7 +106,8 @@ static int jpeg_v2_0_sw_init(void *handle)
        ring->use_doorbell = true;
        ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
        sprintf(ring->name, "jpeg_dec");
-       r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0);
+       r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq,
+                            0, AMDGPU_RING_PRIO_DEFAULT);
        if (r)
                return r;
 
@@ -169,14 +170,11 @@ static int jpeg_v2_0_hw_init(void *handle)
 static int jpeg_v2_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec;
 
        if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
              RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
                jpeg_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
 
-       ring->sched.ready = false;
-
        return 0;
 }
 
index c04c2078a7c1f3655762e871c9147db0bafe8b64..713c325604453ded7fa57acad5c6246b9050b85f 100644 (file)
@@ -118,7 +118,8 @@ static int jpeg_v2_5_sw_init(void *handle)
                ring->use_doorbell = true;
                ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8 * i;
                sprintf(ring->name, "jpeg_dec_%d", i);
-               r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst[i].irq, 0);
+               r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst[i].irq,
+                                    0, AMDGPU_RING_PRIO_DEFAULT);
                if (r)
                        return r;
 
@@ -267,7 +268,6 @@ static void jpeg_v2_5_disable_clock_gating(struct amdgpu_device* adev, int inst)
        data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE);
        data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK
                | JPEG_CGC_GATE__JPEG2_DEC_MASK
-               | JPEG_CGC_GATE__JPEG_ENC_MASK
                | JPEG_CGC_GATE__JMCIF_MASK
                | JPEG_CGC_GATE__JRBBM_MASK);
        WREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE, data);
old mode 100644 (file)
new mode 100755 (executable)
index 396c2a6..4057672
@@ -96,7 +96,7 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
        WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
                     min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
 
-       if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
+       if (adev->apu_flags & AMD_APU_IS_RAVEN2)
                /*
                 * Raven2 has a HW issue that it is unable to use the vram which
                 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
@@ -690,7 +690,8 @@ static const struct soc15_reg_entry mmhub_v1_0_edc_cnt_regs[] = {
    { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), 0, 0, 0},
 };
 
-static int mmhub_v1_0_get_ras_error_count(const struct soc15_reg_entry *reg,
+static int mmhub_v1_0_get_ras_error_count(struct amdgpu_device *adev,
+       const struct soc15_reg_entry *reg,
        uint32_t value, uint32_t *sec_count, uint32_t *ded_count)
 {
        uint32_t i;
@@ -704,7 +705,8 @@ static int mmhub_v1_0_get_ras_error_count(const struct soc15_reg_entry *reg,
                                mmhub_v1_0_ras_fields[i].sec_count_mask) >>
                                mmhub_v1_0_ras_fields[i].sec_count_shift;
                if (sec_cnt) {
-                       DRM_INFO("MMHUB SubBlock %s, SEC %d\n",
+                       dev_info(adev->dev,
+                               "MMHUB SubBlock %s, SEC %d\n",
                                mmhub_v1_0_ras_fields[i].name,
                                sec_cnt);
                        *sec_count += sec_cnt;
@@ -714,7 +716,8 @@ static int mmhub_v1_0_get_ras_error_count(const struct soc15_reg_entry *reg,
                                mmhub_v1_0_ras_fields[i].ded_count_mask) >>
                                mmhub_v1_0_ras_fields[i].ded_count_shift;
                if (ded_cnt) {
-                       DRM_INFO("MMHUB SubBlock %s, DED %d\n",
+                       dev_info(adev->dev,
+                               "MMHUB SubBlock %s, DED %d\n",
                                mmhub_v1_0_ras_fields[i].name,
                                ded_cnt);
                        *ded_count += ded_cnt;
@@ -739,7 +742,8 @@ static void mmhub_v1_0_query_ras_error_count(struct amdgpu_device *adev,
                reg_value =
                        RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_0_edc_cnt_regs[i]));
                if (reg_value)
-                       mmhub_v1_0_get_ras_error_count(&mmhub_v1_0_edc_cnt_regs[i],
+                       mmhub_v1_0_get_ras_error_count(adev,
+                               &mmhub_v1_0_edc_cnt_regs[i],
                                reg_value, &sec_count, &ded_count);
        }
 
index 37dbe0f2142f54fc7730b212710b4dfea0a9bec3..83b453f5d7176616df5f78346422a2c8197e2bfc 100644 (file)
@@ -26,7 +26,7 @@
 
 #define AI_MAILBOX_POLL_ACK_TIMEDOUT   500
 #define AI_MAILBOX_POLL_MSG_TIMEDOUT   12000
-#define AI_MAILBOX_POLL_FLR_TIMEDOUT   500
+#define AI_MAILBOX_POLL_FLR_TIMEDOUT   5000
 
 enum idh_request {
        IDH_REQ_GPU_INIT_ACCESS = 1,
@@ -46,7 +46,8 @@ enum idh_event {
        IDH_SUCCESS,
        IDH_FAIL,
        IDH_QUERY_ALIVE,
-       IDH_EVENT_MAX
+
+       IDH_TEXT_MESSAGE = 255,
 };
 
 extern const struct amdgpu_virt_ops xgpu_ai_virt_ops;
index 237fa5e16b7c9c08282636400317a03321a961d3..ce2bf1fb79ed12a2c7a11cb30eb5c8c9f49eebe0 100644 (file)
@@ -30,7 +30,6 @@
 #include "navi10_ih.h"
 #include "soc15_common.h"
 #include "mxgpu_nv.h"
-#include "mxgpu_ai.h"
 
 static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev)
 {
@@ -53,8 +52,7 @@ static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val)
  */
 static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev)
 {
-       return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
-                               mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW0));
+       return RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
 }
 
 
@@ -63,8 +61,7 @@ static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev,
 {
        u32 reg;
 
-       reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
-                                            mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW0));
+       reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
        if (reg != event)
                return -ENOENT;
 
@@ -110,7 +107,6 @@ static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
                timeout -= 10;
        } while (timeout > 1);
 
-       pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
 
        return -ETIME;
 }
@@ -118,7 +114,6 @@ static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
 static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
              enum idh_request req, u32 data1, u32 data2, u32 data3)
 {
-       u32 reg;
        int r;
        uint8_t trn;
 
@@ -137,19 +132,10 @@ static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
                }
        } while (trn);
 
-       reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
-                                            mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0));
-       reg = REG_SET_FIELD(reg, BIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0,
-                           MSGBUF_DATA, req);
-       WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0),
-                     reg);
-       WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW1),
-                               data1);
-       WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW2),
-                               data2);
-       WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW3),
-                               data3);
-
+       WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, req);
+       WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW1, data1);
+       WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW2, data2);
+       WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW3, data3);
        xgpu_nv_mailbox_set_valid(adev, true);
 
        /* start to poll ack */
@@ -164,23 +150,48 @@ static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
                                        enum idh_request req)
 {
        int r;
+       enum idh_event event = -1;
 
        xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0);
 
-       /* start to check msg if request is idh_req_gpu_init_access */
-       if (req == IDH_REQ_GPU_INIT_ACCESS ||
-               req == IDH_REQ_GPU_FINI_ACCESS ||
-               req == IDH_REQ_GPU_RESET_ACCESS) {
-               r = xgpu_nv_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
+       switch (req) {
+       case IDH_REQ_GPU_INIT_ACCESS:
+       case IDH_REQ_GPU_FINI_ACCESS:
+       case IDH_REQ_GPU_RESET_ACCESS:
+               event = IDH_READY_TO_ACCESS_GPU;
+               break;
+       case IDH_REQ_GPU_INIT_DATA:
+               event = IDH_REQ_GPU_INIT_DATA_READY;
+               break;
+       default:
+               break;
+       }
+
+       if (event != -1) {
+               r = xgpu_nv_poll_msg(adev, event);
                if (r) {
-                       pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n");
-                       return r;
+                       if (req != IDH_REQ_GPU_INIT_DATA) {
+                               pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
+                               return r;
+                       }
+                       else /* host doesn't support REQ_GPU_INIT_DATA handshake */
+                               adev->virt.req_init_data_ver = 0;
+               } else {
+                       if (req == IDH_REQ_GPU_INIT_DATA)
+                       {
+                               adev->virt.req_init_data_ver =
+                                       RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1);
+
+                               /* assume V1 in case host doesn't set version number */
+                               if (adev->virt.req_init_data_ver < 1)
+                                       adev->virt.req_init_data_ver = 1;
+                       }
                }
+
                /* Retrieve checksum from mailbox2 */
                if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
                        adev->virt.fw_reserve.checksum_key =
-                               RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
-                                       mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW2));
+                               RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2);
                }
        }
 
@@ -213,6 +224,11 @@ static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev,
        return r;
 }
 
+static int xgpu_nv_request_init_data(struct amdgpu_device *adev)
+{
+       return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
+}
+
 static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
                                        struct amdgpu_irq_src *source,
                                        struct amdgpu_iv_entry *entry)
@@ -226,11 +242,14 @@ static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev,
                                        unsigned type,
                                        enum amdgpu_interrupt_state state)
 {
-       u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL));
+       u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
+
+       if (state == AMDGPU_IRQ_STATE_ENABLE)
+               tmp |= 2;
+       else
+               tmp &= ~2;
 
-       tmp = REG_SET_FIELD(tmp, BIF_BX_PF_MAILBOX_INT_CNTL, ACK_INT_EN,
-                               (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
-       WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL), tmp);
+       WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
 
        return 0;
 }
@@ -282,11 +301,14 @@ static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
                                       unsigned type,
                                       enum amdgpu_interrupt_state state)
 {
-       u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL));
+       u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
+
+       if (state == AMDGPU_IRQ_STATE_ENABLE)
+               tmp |= 1;
+       else
+               tmp &= ~1;
 
-       tmp = REG_SET_FIELD(tmp, BIF_BX_PF_MAILBOX_INT_CNTL, VALID_INT_EN,
-                           (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
-       WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL), tmp);
+       WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
 
        return 0;
 }
@@ -378,6 +400,7 @@ void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev)
 const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
        .req_full_gpu   = xgpu_nv_request_full_gpu_access,
        .rel_full_gpu   = xgpu_nv_release_full_gpu_access,
+       .req_init_data  = xgpu_nv_request_init_data,
        .reset_gpu = xgpu_nv_request_reset,
        .wait_reset = NULL,
        .trans_msg = xgpu_nv_mailbox_trans_msg,
index 99b15f6865cb8fb1c7d3fda771ba3fce1c5eec55..52605e14a1a530457f0a41b3577a29efc6ba56b1 100644 (file)
 #define __MXGPU_NV_H__
 
 #define NV_MAILBOX_POLL_ACK_TIMEDOUT   500
-#define NV_MAILBOX_POLL_MSG_TIMEDOUT   12000
-#define NV_MAILBOX_POLL_FLR_TIMEDOUT   500
+#define NV_MAILBOX_POLL_MSG_TIMEDOUT   6000
+#define NV_MAILBOX_POLL_FLR_TIMEDOUT   5000
+
+enum idh_request {
+       IDH_REQ_GPU_INIT_ACCESS = 1,
+       IDH_REL_GPU_INIT_ACCESS,
+       IDH_REQ_GPU_FINI_ACCESS,
+       IDH_REL_GPU_FINI_ACCESS,
+       IDH_REQ_GPU_RESET_ACCESS,
+       IDH_REQ_GPU_INIT_DATA,
+
+       IDH_LOG_VF_ERROR       = 200,
+};
+
+enum idh_event {
+       IDH_CLR_MSG_BUF = 0,
+       IDH_READY_TO_ACCESS_GPU,
+       IDH_FLR_NOTIFICATION,
+       IDH_FLR_NOTIFICATION_CMPL,
+       IDH_SUCCESS,
+       IDH_FAIL,
+       IDH_QUERY_ALIVE,
+       IDH_REQ_GPU_INIT_DATA_READY,
+
+       IDH_TEXT_MESSAGE = 255,
+};
 
 extern const struct amdgpu_virt_ops xgpu_nv_virt_ops;
 
@@ -35,7 +59,21 @@ int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev);
 int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev);
 void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev);
 
-#define NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE (SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_CONTROL) * 4)
-#define NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE (SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_CONTROL) * 4 + 1)
+#define mmMAILBOX_CONTROL 0xE5E
+
+#define NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE (mmMAILBOX_CONTROL * 4)
+#define NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE (NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE + 1)
+
+#define mmMAILBOX_MSGBUF_TRN_DW0 0xE56
+#define mmMAILBOX_MSGBUF_TRN_DW1 0xE57
+#define mmMAILBOX_MSGBUF_TRN_DW2 0xE58
+#define mmMAILBOX_MSGBUF_TRN_DW3 0xE59
+
+#define mmMAILBOX_MSGBUF_RCV_DW0 0xE5A
+#define mmMAILBOX_MSGBUF_RCV_DW1 0xE5B
+#define mmMAILBOX_MSGBUF_RCV_DW2 0xE5C
+#define mmMAILBOX_MSGBUF_RCV_DW3 0xE5D
+
+#define mmMAILBOX_INT_CNTL 0xE5F
 
 #endif
index f13dc6cc158f959778c71df70e291439782e5958..713ee66a4d3e20482ed8fee612d1fc988f0a8bcd 100644 (file)
@@ -43,7 +43,8 @@ enum idh_event {
        IDH_READY_TO_ACCESS_GPU,
        IDH_FLR_NOTIFICATION,
        IDH_FLR_NOTIFICATION_CMPL,
-       IDH_EVENT_MAX
+
+       IDH_TEXT_MESSAGE = 255
 };
 
 extern const struct amdgpu_virt_ops xgpu_vi_virt_ops;
index e08245a446fcecffa350f92fb939a1b5ed76570a..f97857ed3c7e046a6d8d56125178333f4e6fe435 100644 (file)
@@ -49,8 +49,48 @@ static void navi10_ih_enable_interrupts(struct amdgpu_device *adev)
 
        ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
        ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
-       WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
+       if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+               if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
+                       DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
+                       return;
+               }
+       } else {
+               WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
+       }
+
        adev->irq.ih.enabled = true;
+
+       if (adev->irq.ih1.ring_size) {
+               ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+               ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
+                                          RB_ENABLE, 1);
+               if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+                       if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
+                                               ih_rb_cntl)) {
+                               DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
+                               return;
+                       }
+               } else {
+                       WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
+               }
+               adev->irq.ih1.enabled = true;
+       }
+
+       if (adev->irq.ih2.ring_size) {
+               ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
+               ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
+                                          RB_ENABLE, 1);
+               if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+                       if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
+                                               ih_rb_cntl)) {
+                               DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
+                               return;
+                       }
+               } else {
+                       WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
+               }
+               adev->irq.ih2.enabled = true;
+       }
 }
 
 /**
@@ -66,12 +106,61 @@ static void navi10_ih_disable_interrupts(struct amdgpu_device *adev)
 
        ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
        ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0);
-       WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
+       if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+               if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
+                       DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
+                       return;
+               }
+       } else {
+               WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
+       }
+
        /* set rptr, wptr to 0 */
        WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
        WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
        adev->irq.ih.enabled = false;
        adev->irq.ih.rptr = 0;
+
+       if (adev->irq.ih1.ring_size) {
+               ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+               ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
+                                          RB_ENABLE, 0);
+               if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+                       if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
+                                               ih_rb_cntl)) {
+                               DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
+                               return;
+                       }
+               } else {
+                       WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
+               }
+               /* set rptr, wptr to 0 */
+               WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
+               WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
+               adev->irq.ih1.enabled = false;
+               adev->irq.ih1.rptr = 0;
+       }
+
+       if (adev->irq.ih2.ring_size) {
+               ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
+               ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
+                                          RB_ENABLE, 0);
+               if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+                       if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
+                                               ih_rb_cntl)) {
+                               DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
+                               return;
+                       }
+               } else {
+                       WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
+               }
+               /* set rptr, wptr to 0 */
+               WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
+               WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
+               adev->irq.ih2.enabled = false;
+               adev->irq.ih2.rptr = 0;
+       }
+
 }
 
 static uint32_t navi10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl)
@@ -97,6 +186,43 @@ static uint32_t navi10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl
        return ih_rb_cntl;
 }
 
+static uint32_t navi10_ih_doorbell_rptr(struct amdgpu_ih_ring *ih)
+{
+       u32 ih_doorbell_rtpr = 0;
+
+       if (ih->use_doorbell) {
+               ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+                                                IH_DOORBELL_RPTR, OFFSET,
+                                                ih->doorbell_index);
+               ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+                                                IH_DOORBELL_RPTR,
+                                                ENABLE, 1);
+       } else {
+               ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+                                                IH_DOORBELL_RPTR,
+                                                ENABLE, 0);
+       }
+       return ih_doorbell_rtpr;
+}
+
+static void navi10_ih_reroute_ih(struct amdgpu_device *adev)
+{
+       uint32_t tmp;
+
+       /* Reroute to IH ring 1 for VMC */
+       WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x12);
+       tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA);
+       tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1);
+       tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
+       WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp);
+
+       /* Reroute IH ring 1 for UMC */
+       WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x1B);
+       tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA);
+       tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
+       WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp);
+}
+
 /**
  * navi10_ih_irq_init - init and enable the interrupt ring
  *
@@ -111,7 +237,7 @@ static uint32_t navi10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl
 static int navi10_ih_irq_init(struct amdgpu_device *adev)
 {
        struct amdgpu_ih_ring *ih = &adev->irq.ih;
-       u32 ih_rb_cntl, ih_doorbell_rtpr, ih_chicken;
+       u32 ih_rb_cntl, ih_chicken;
        u32 tmp;
 
        /* disable irqs */
@@ -127,6 +253,15 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
        ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl);
        ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM,
                                   !!adev->irq.msi_enabled);
+       if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+               if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
+                       DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
+                       return -ETIMEDOUT;
+               }
+       } else {
+               WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
+       }
+       navi10_ih_reroute_ih(adev);
 
        if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)) {
                if (ih->use_bus_addr) {
@@ -137,8 +272,6 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
                }
        }
 
-       WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
-
        /* set the writeback address whether it's enabled or not */
        WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO,
                     lower_32_bits(ih->wptr_addr));
@@ -149,22 +282,68 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
        WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
        WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
 
-       ih_doorbell_rtpr = RREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR);
-       if (ih->use_doorbell) {
-               ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
-                                                IH_DOORBELL_RPTR, OFFSET,
-                                                ih->doorbell_index);
-               ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
-                                                IH_DOORBELL_RPTR, ENABLE, 1);
-       } else {
-               ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
-                                                IH_DOORBELL_RPTR, ENABLE, 0);
-       }
-       WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, ih_doorbell_rtpr);
+       WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR,
+                       navi10_ih_doorbell_rptr(ih));
 
        adev->nbio.funcs->ih_doorbell_range(adev, ih->use_doorbell,
                                            ih->doorbell_index);
 
+       ih = &adev->irq.ih1;
+       if (ih->ring_size) {
+               WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING1, ih->gpu_addr >> 8);
+               WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING1,
+                            (ih->gpu_addr >> 40) & 0xff);
+
+               ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+               ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl);
+               ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+                                          WPTR_OVERFLOW_ENABLE, 0);
+               ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+                                          RB_FULL_DRAIN_ENABLE, 1);
+               if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+                       if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
+                                               ih_rb_cntl)) {
+                               DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
+                               return -ETIMEDOUT;
+                       }
+               } else {
+                       WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
+               }
+               /* set rptr, wptr to 0 */
+               WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
+               WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
+
+               WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1,
+                               navi10_ih_doorbell_rptr(ih));
+       }
+
+       ih = &adev->irq.ih2;
+       if (ih->ring_size) {
+               WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING2, ih->gpu_addr >> 8);
+               WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING2,
+                            (ih->gpu_addr >> 40) & 0xff);
+
+               ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
+               ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl);
+
+               if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+                       if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
+                                               ih_rb_cntl)) {
+                               DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
+                               return -ETIMEDOUT;
+                       }
+               } else {
+                       WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
+               }
+               /* set rptr, wptr to 0 */
+               WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
+               WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
+
+               WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2,
+                            navi10_ih_doorbell_rptr(ih));
+       }
+
+
        tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
        tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
                            CLIENT18_IS_STORM_CLIENT, 1);
@@ -217,7 +396,15 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
        if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
                goto out;
 
-       reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
+       if (ih == &adev->irq.ih)
+               reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
+       else if (ih == &adev->irq.ih1)
+               reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1);
+       else if (ih == &adev->irq.ih2)
+               reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2);
+       else
+               BUG();
+
        wptr = RREG32_NO_KIQ(reg);
        if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
                goto out;
@@ -233,7 +420,15 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
                 wptr, ih->rptr, tmp);
        ih->rptr = tmp;
 
-       reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
+       if (ih == &adev->irq.ih)
+               reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
+       else if (ih == &adev->irq.ih1)
+               reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+       else if (ih == &adev->irq.ih2)
+               reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2);
+       else
+               BUG();
+
        tmp = RREG32_NO_KIQ(reg);
        tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
        WREG32_NO_KIQ(reg, tmp);
@@ -333,8 +528,52 @@ static void navi10_ih_set_rptr(struct amdgpu_device *adev,
 
                if (amdgpu_sriov_vf(adev))
                        navi10_ih_irq_rearm(adev, ih);
-       } else
+       } else if (ih == &adev->irq.ih) {
                WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr);
+       } else if (ih == &adev->irq.ih1) {
+               WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, ih->rptr);
+       } else if (ih == &adev->irq.ih2) {
+               WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, ih->rptr);
+       }
+}
+
+/**
+ * navi10_ih_self_irq - dispatch work for ring 1 and 2
+ *
+ * @adev: amdgpu_device pointer
+ * @source: irq source
+ * @entry: IV with WPTR update
+ *
+ * Update the WPTR from the IV and schedule work to handle the entries.
+ */
+static int navi10_ih_self_irq(struct amdgpu_device *adev,
+                             struct amdgpu_irq_src *source,
+                             struct amdgpu_iv_entry *entry)
+{
+       uint32_t wptr = cpu_to_le32(entry->src_data[0]);
+
+       switch (entry->ring_id) {
+       case 1:
+               *adev->irq.ih1.wptr_cpu = wptr;
+               schedule_work(&adev->irq.ih1_work);
+               break;
+       case 2:
+               *adev->irq.ih2.wptr_cpu = wptr;
+               schedule_work(&adev->irq.ih2_work);
+               break;
+       default: break;
+       }
+       return 0;
+}
+
+static const struct amdgpu_irq_src_funcs navi10_ih_self_irq_funcs = {
+       .process = navi10_ih_self_irq,
+};
+
+static void navi10_ih_set_self_irq_funcs(struct amdgpu_device *adev)
+{
+       adev->irq.self_irq.num_types = 0;
+       adev->irq.self_irq.funcs = &navi10_ih_self_irq_funcs;
 }
 
 static int navi10_ih_early_init(void *handle)
@@ -342,6 +581,7 @@ static int navi10_ih_early_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        navi10_ih_set_interrupt_funcs(adev);
+       navi10_ih_set_self_irq_funcs(adev);
        return 0;
 }
 
@@ -351,6 +591,12 @@ static int navi10_ih_sw_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        bool use_bus_addr;
 
+       r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_IH, 0,
+                               &adev->irq.self_irq);
+
+       if (r)
+               return r;
+
        /* use gpu virtual address for ih ring
         * until ih_checken is programmed to allow
         * use bus address for ih ring by psp bl */
@@ -363,6 +609,20 @@ static int navi10_ih_sw_init(void *handle)
        adev->irq.ih.use_doorbell = true;
        adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
 
+       r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
+       if (r)
+               return r;
+
+       adev->irq.ih1.use_doorbell = true;
+       adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1;
+
+       r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
+       if (r)
+               return r;
+
+       adev->irq.ih2.use_doorbell = true;
+       adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1;
+
        r = amdgpu_irq_init(adev);
 
        return r;
@@ -373,6 +633,8 @@ static int navi10_ih_sw_fini(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        amdgpu_irq_fini(adev);
+       amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
+       amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
        amdgpu_ih_ring_fini(adev, &adev->irq.ih);
 
        return 0;
index f3a3fe746222f39ff681212a40a10feb3777facb..cbcf04578b999b968f49efee40a4e44fad4f59e0 100644 (file)
@@ -290,23 +290,6 @@ const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg = {
        .ref_and_mask_sdma1 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__SDMA1_MASK,
 };
 
-static void nbio_v2_3_detect_hw_virt(struct amdgpu_device *adev)
-{
-       uint32_t reg;
-
-       reg = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_RCC_IOV_FUNC_IDENTIFIER);
-       if (reg & 1)
-               adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
-
-       if (reg & 0x80000000)
-               adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
-
-       if (!reg) {
-               if (is_virtual_machine())       /* passthrough mode exclus sriov mod */
-                       adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
-       }
-}
-
 static void nbio_v2_3_init_registers(struct amdgpu_device *adev)
 {
        uint32_t def, data;
@@ -338,6 +321,5 @@ const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
        .get_clockgating_state = nbio_v2_3_get_clockgating_state,
        .ih_control = nbio_v2_3_ih_control,
        .init_registers = nbio_v2_3_init_registers,
-       .detect_hw_virt = nbio_v2_3_detect_hw_virt,
        .remap_hdp_registers = nbio_v2_3_remap_hdp_registers,
 };
index 635d9e1fc0a364db991317bec42e6d6edccc29b1..7b2fb050407d2fe785bfa389a92497e2bdbdbedf 100644 (file)
@@ -241,23 +241,6 @@ const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
        .ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK
 };
 
-static void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev)
-{
-       uint32_t reg;
-
-       reg = RREG32_SOC15(NBIO, 0, mmRCC_PF_0_0_RCC_IOV_FUNC_IDENTIFIER);
-       if (reg & 1)
-               adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
-
-       if (reg & 0x80000000)
-               adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
-
-       if (!reg) {
-               if (is_virtual_machine())       /* passthrough mode exclus sriov mod */
-                       adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
-       }
-}
-
 static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
 {
        uint32_t def, data;
@@ -294,5 +277,4 @@ const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
        .get_clockgating_state = nbio_v6_1_get_clockgating_state,
        .ih_control = nbio_v6_1_ih_control,
        .init_registers = nbio_v6_1_init_registers,
-       .detect_hw_virt = nbio_v6_1_detect_hw_virt,
 };
index d6cbf26074bca475d915d1ac9f1d6b6c13665130..d34628e113fc389bb1809a0905aad09e12d37d46 100644 (file)
@@ -280,12 +280,6 @@ const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = {
        .ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK,
 };
 
-static void nbio_v7_0_detect_hw_virt(struct amdgpu_device *adev)
-{
-       if (is_virtual_machine())       /* passthrough mode exclus sriov mod */
-               adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
-}
-
 static void nbio_v7_0_init_registers(struct amdgpu_device *adev)
 {
 
@@ -310,6 +304,5 @@ const struct amdgpu_nbio_funcs nbio_v7_0_funcs = {
        .get_clockgating_state = nbio_v7_0_get_clockgating_state,
        .ih_control = nbio_v7_0_ih_control,
        .init_registers = nbio_v7_0_init_registers,
-       .detect_hw_virt = nbio_v7_0_detect_hw_virt,
        .remap_hdp_registers = nbio_v7_0_remap_hdp_registers,
 };
index 149d386590df2beb6b7f034ec46061e5b6c58be8..e629156173d31b46b6e2bada2f5ed2a19710a06a 100644 (file)
@@ -185,7 +185,7 @@ static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev,
 
        if (use_doorbell) {
                ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, OFFSET, doorbell_index);
-               ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 2);
+               ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 4);
        } else
                ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 0);
 
@@ -292,23 +292,6 @@ const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = {
        .ref_and_mask_sdma7 = GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK,
 };
 
-static void nbio_v7_4_detect_hw_virt(struct amdgpu_device *adev)
-{
-       uint32_t reg;
-
-       reg = RREG32_SOC15(NBIO, 0, mmRCC_IOV_FUNC_IDENTIFIER);
-       if (reg & 1)
-               adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
-
-       if (reg & 0x80000000)
-               adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
-
-       if (!reg) {
-               if (is_virtual_machine())       /* passthrough mode exclus sriov mod */
-                       adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
-       }
-}
-
 static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
 {
 
@@ -340,14 +323,20 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
                obj->err_data.ce_count += err_data.ce_count;
 
                if (err_data.ce_count)
-                       DRM_INFO("%ld correctable errors detected in %s block\n",
-                               obj->err_data.ce_count, adev->nbio.ras_if->name);
+                       dev_info(adev->dev, "%ld correctable hardware "
+                                       "errors detected in %s block, "
+                                       "no user action is needed.\n",
+                                       obj->err_data.ce_count,
+                                       adev->nbio.ras_if->name);
 
                if (err_data.ue_count)
-                       DRM_INFO("%ld uncorrectable errors detected in %s block\n",
-                               obj->err_data.ue_count, adev->nbio.ras_if->name);
+                       dev_info(adev->dev, "%ld uncorrectable hardware "
+                                       "errors detected in %s block\n",
+                                       obj->err_data.ue_count,
+                                       adev->nbio.ras_if->name);
 
-               DRM_WARN("RAS controller interrupt triggered by NBIF error\n");
+               dev_info(adev->dev, "RAS controller interrupt triggered "
+                                       "by NBIF error\n");
 
                /* ras_controller_int is dedicated for nbif ras error,
                 * not the global interrupt for sync flood
@@ -561,7 +550,6 @@ const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
        .get_clockgating_state = nbio_v7_4_get_clockgating_state,
        .ih_control = nbio_v7_4_ih_control,
        .init_registers = nbio_v7_4_init_registers,
-       .detect_hw_virt = nbio_v7_4_detect_hw_virt,
        .remap_hdp_registers = nbio_v7_4_remap_hdp_registers,
        .handle_ras_controller_intr_no_bifring = nbio_v7_4_handle_ras_controller_intr_no_bifring,
        .handle_ras_err_event_athub_intr_no_bifring = nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring,
index 52318b03c424ff570d22cca4b33fd10d0c971b08..6655dd2009b62ef10cfd3dd31fafd019ffb0edc6 100644 (file)
@@ -453,18 +453,19 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
 {
        int r;
 
-       /* Set IP register base before any HW register access */
-       r = nv_reg_base_init(adev);
-       if (r)
-               return r;
-
        adev->nbio.funcs = &nbio_v2_3_funcs;
        adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
 
-       adev->nbio.funcs->detect_hw_virt(adev);
-
-       if (amdgpu_sriov_vf(adev))
+       if (amdgpu_sriov_vf(adev)) {
                adev->virt.ops = &xgpu_nv_virt_ops;
+               /* try send GPU_INIT_DATA request to host */
+               amdgpu_virt_request_init_data(adev);
+       }
+
+       /* Set IP register base before any HW register access */
+       r = nv_reg_base_init(adev);
+       if (r)
+               return r;
 
        switch (adev->asic_type) {
        case CHIP_NAVI10:
@@ -497,8 +498,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
                amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
                amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
                amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
-               if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
-                   !amdgpu_sriov_vf(adev))
+               if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
                        amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
                if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
                        amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
@@ -548,13 +548,6 @@ static bool nv_need_full_reset(struct amdgpu_device *adev)
        return true;
 }
 
-static void nv_get_pcie_usage(struct amdgpu_device *adev,
-                             uint64_t *count0,
-                             uint64_t *count1)
-{
-       /*TODO*/
-}
-
 static bool nv_need_reset_on_init(struct amdgpu_device *adev)
 {
 #if 0
@@ -629,7 +622,6 @@ static const struct amdgpu_asic_funcs nv_asic_funcs =
        .invalidate_hdp = &nv_invalidate_hdp,
        .init_doorbell_index = &nv_init_doorbell_index,
        .need_full_reset = &nv_need_full_reset,
-       .get_pcie_usage = &nv_get_pcie_usage,
        .need_reset_on_init = &nv_need_reset_on_init,
        .get_pcie_replay_count = &nv_get_pcie_replay_count,
        .supports_baco = &nv_asic_supports_baco,
index 1de984647dbbfcc790ae762cc35768332d90e849..fd6b58243b03279b52d5f042e39d2727370924e9 100644 (file)
 #define        PACKET3_BLK_CNTX_UPDATE                         0x53
 #define        PACKET3_INCR_UPDT_STATE                         0x55
 #define        PACKET3_ACQUIRE_MEM                             0x58
+/* 1.  HEADER
+ * 2.  COHER_CNTL [30:0]
+ * 2.1 ENGINE_SEL [31:31]
+ * 2.  COHER_SIZE [31:0]
+ * 3.  COHER_SIZE_HI [7:0]
+ * 4.  COHER_BASE_LO [31:0]
+ * 5.  COHER_BASE_HI [23:0]
+ * 7.  POLL_INTERVAL [15:0]
+ * 8.  GCR_CNTL [18:0]
+ */
+#define        PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(x) ((x) << 0)
+               /*
+                * 0:NOP
+                * 1:ALL
+                * 2:RANGE
+                * 3:FIRST_LAST
+                */
+#define        PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_RANGE(x) ((x) << 2)
+               /*
+                * 0:ALL
+                * 1:reserved
+                * 2:RANGE
+                * 3:FIRST_LAST
+                */
+#define        PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_WB(x) ((x) << 4)
+#define        PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_INV(x) ((x) << 5)
+#define        PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_WB(x) ((x) << 6)
+#define        PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(x) ((x) << 7)
+#define        PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(x) ((x) << 8)
+#define        PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_INV(x) ((x) << 9)
+#define        PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_US(x) ((x) << 10)
+#define        PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_RANGE(x) ((x) << 11)
+               /*
+                * 0:ALL
+                * 1:VOL
+                * 2:RANGE
+                * 3:FIRST_LAST
+                */
+#define        PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_DISCARD(x)  ((x) << 13)
+#define        PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(x) ((x) << 14)
+#define        PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(x) ((x) << 15)
+#define        PACKET3_ACQUIRE_MEM_GCR_CNTL_SEQ(x) ((x) << 16)
+               /*
+                * 0: PARALLEL
+                * 1: FORWARD
+                * 2: REVERSE
+                */
+#define        PACKET3_ACQUIRE_MEM_GCR_RANGE_IS_PA  (1 << 18)
 #define        PACKET3_REWIND                                  0x59
 #define        PACKET3_INTERRUPT                               0x5A
 #define        PACKET3_GEN_PDEPTE                              0x5B
 #define        PACKET3_GET_LOD_STATS                           0x8E
 #define        PACKET3_DRAW_MULTI_PREAMBLE                     0x8F
 #define        PACKET3_FRAME_CONTROL                           0x90
+#                      define FRAME_TMZ        (1 << 0)
 #                      define FRAME_CMD(x) ((x) << 28)
                        /*
                         * x=0: tmz_begin
index 7539104175e8ae451520e618c4886b67f5d35e37..d7f92634eba271e40e5c1e219dfbb28a563a0bbb 100644 (file)
@@ -50,15 +50,14 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
        const char *chip_name;
        char fw_name[30];
        int err = 0;
-       const struct psp_firmware_header_v1_0 *hdr;
        const struct ta_firmware_header_v1_0 *ta_hdr;
        DRM_DEBUG("\n");
 
        switch (adev->asic_type) {
        case CHIP_RAVEN:
-               if (adev->rev_id >= 0x8)
+               if (adev->apu_flags & AMD_APU_IS_RAVEN2)
                        chip_name = "raven2";
-               else if (adev->pdev->device == 0x15d8)
+               else if (adev->apu_flags & AMD_APU_IS_PICASSO)
                        chip_name = "picasso";
                else
                        chip_name = "raven";
@@ -66,22 +65,10 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
        default: BUG();
        }
 
-       snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
-       err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
+       err = psp_init_asd_microcode(psp, chip_name);
        if (err)
                goto out;
 
-       err = amdgpu_ucode_validate(adev->psp.asd_fw);
-       if (err)
-               goto out;
-
-       hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
-       adev->psp.asd_fw_version = le32_to_cpu(hdr->header.ucode_version);
-       adev->psp.asd_feature_version = le32_to_cpu(hdr->ucode_feature_version);
-       adev->psp.asd_ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
-       adev->psp.asd_start_addr = (uint8_t *)hdr +
-                               le32_to_cpu(hdr->header.ucode_array_offset_bytes);
-
        snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
        err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
        if (err) {
@@ -126,8 +113,6 @@ out:
                dev_err(adev->dev,
                        "psp v10.0: Failed to load firmware \"%s\"\n",
                        fw_name);
-               release_firmware(adev->psp.asd_fw);
-               adev->psp.asd_fw = NULL;
        }
 
        return err;
@@ -230,129 +215,6 @@ static int psp_v10_0_ring_destroy(struct psp_context *psp,
        return ret;
 }
 
-static int
-psp_v10_0_sram_map(struct amdgpu_device *adev,
-                  unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
-                  unsigned int *sram_data_reg_offset,
-                  enum AMDGPU_UCODE_ID ucode_id)
-{
-       int ret = 0;
-
-       switch(ucode_id) {
-/* TODO: needs to confirm */
-#if 0
-       case AMDGPU_UCODE_ID_SMC:
-               *sram_offset = 0;
-               *sram_addr_reg_offset = 0;
-               *sram_data_reg_offset = 0;
-               break;
-#endif
-
-       case AMDGPU_UCODE_ID_CP_CE:
-               *sram_offset = 0x0;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_CP_PFP:
-               *sram_offset = 0x0;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_CP_ME:
-               *sram_offset = 0x0;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_CP_MEC1:
-               *sram_offset = 0x10000;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_CP_MEC2:
-               *sram_offset = 0x10000;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_RLC_G:
-               *sram_offset = 0x2000;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_SDMA0:
-               *sram_offset = 0x0;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_DATA);
-               break;
-
-/* TODO: needs to confirm */
-#if 0
-       case AMDGPU_UCODE_ID_SDMA1:
-               *sram_offset = ;
-               *sram_addr_reg_offset = ;
-               break;
-
-       case AMDGPU_UCODE_ID_UVD:
-               *sram_offset = ;
-               *sram_addr_reg_offset = ;
-               break;
-
-       case AMDGPU_UCODE_ID_VCE:
-               *sram_offset = ;
-               *sram_addr_reg_offset = ;
-               break;
-#endif
-
-       case AMDGPU_UCODE_ID_MAXIMUM:
-       default:
-               ret = -EINVAL;
-               break;
-       }
-
-       return ret;
-}
-
-static bool psp_v10_0_compare_sram_data(struct psp_context *psp,
-                                       struct amdgpu_firmware_info *ucode,
-                                       enum AMDGPU_UCODE_ID ucode_type)
-{
-       int err = 0;
-       unsigned int fw_sram_reg_val = 0;
-       unsigned int fw_sram_addr_reg_offset = 0;
-       unsigned int fw_sram_data_reg_offset = 0;
-       unsigned int ucode_size;
-       uint32_t *ucode_mem = NULL;
-       struct amdgpu_device *adev = psp->adev;
-
-       err = psp_v10_0_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset,
-                               &fw_sram_data_reg_offset, ucode_type);
-       if (err)
-               return false;
-
-       WREG32(fw_sram_addr_reg_offset, fw_sram_reg_val);
-
-       ucode_size = ucode->ucode_size;
-       ucode_mem = (uint32_t *)ucode->kaddr;
-       while (!ucode_size) {
-               fw_sram_reg_val = RREG32(fw_sram_data_reg_offset);
-
-               if (*ucode_mem != fw_sram_reg_val)
-                       return false;
-
-               ucode_mem++;
-               /* 4 bytes */
-               ucode_size -= 4;
-       }
-
-       return true;
-}
-
-
 static int psp_v10_0_mode1_reset(struct psp_context *psp)
 {
        DRM_INFO("psp mode 1 reset not supported now! \n");
@@ -379,7 +241,6 @@ static const struct psp_funcs psp_v10_0_funcs = {
        .ring_create = psp_v10_0_ring_create,
        .ring_stop = psp_v10_0_ring_stop,
        .ring_destroy = psp_v10_0_ring_destroy,
-       .compare_sram_data = psp_v10_0_compare_sram_data,
        .mode1_reset = psp_v10_0_mode1_reset,
        .ring_get_wptr = psp_v10_0_ring_get_wptr,
        .ring_set_wptr = psp_v10_0_ring_set_wptr,
index 0afd610a1263faeb276de8ca284eeb4f1308e497..1de89cc3c3559e4f80c360dade988c61933e031d 100644 (file)
@@ -75,10 +75,6 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
        const char *chip_name;
        char fw_name[30];
        int err = 0;
-       const struct psp_firmware_header_v1_0 *sos_hdr;
-       const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
-       const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
-       const struct psp_firmware_header_v1_0 *asd_hdr;
        const struct ta_firmware_header_v1_0 *ta_hdr;
 
        DRM_DEBUG("\n");
@@ -103,66 +99,13 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
                BUG();
        }
 
-       snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name);
-       err = request_firmware(&adev->psp.sos_fw, fw_name, adev->dev);
+       err = psp_init_sos_microcode(psp, chip_name);
        if (err)
-               goto out;
+               return err;
 
-       err = amdgpu_ucode_validate(adev->psp.sos_fw);
+       err = psp_init_asd_microcode(psp, chip_name);
        if (err)
-               goto out;
-
-       sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
-       amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
-
-       switch (sos_hdr->header.header_version_major) {
-       case 1:
-               adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
-               adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->ucode_feature_version);
-               adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos_size_bytes);
-               adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->sos_offset_bytes);
-               adev->psp.sys_start_addr = (uint8_t *)sos_hdr +
-                               le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
-               adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr +
-                               le32_to_cpu(sos_hdr->sos_offset_bytes);
-               if (sos_hdr->header.header_version_minor == 1) {
-                       sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
-                       adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_1->toc_size_bytes);
-                       adev->psp.toc_start_addr = (uint8_t *)adev->psp.sys_start_addr +
-                                       le32_to_cpu(sos_hdr_v1_1->toc_offset_bytes);
-                       adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_1->kdb_size_bytes);
-                       adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
-                                       le32_to_cpu(sos_hdr_v1_1->kdb_offset_bytes);
-               }
-               if (sos_hdr->header.header_version_minor == 2) {
-                       sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
-                       adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_2->kdb_size_bytes);
-                       adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
-                                                   le32_to_cpu(sos_hdr_v1_2->kdb_offset_bytes);
-               }
-               break;
-       default:
-               dev_err(adev->dev,
-                       "Unsupported psp sos firmware\n");
-               err = -EINVAL;
-               goto out;
-       }
-
-       snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
-       err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
-       if (err)
-               goto out1;
-
-       err = amdgpu_ucode_validate(adev->psp.asd_fw);
-       if (err)
-               goto out1;
-
-       asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
-       adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
-       adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version);
-       adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
-       adev->psp.asd_start_addr = (uint8_t *)asd_hdr +
-                               le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
+               return err;
 
        switch (adev->asic_type) {
        case CHIP_VEGA20:
@@ -194,6 +137,8 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
        case CHIP_NAVI10:
        case CHIP_NAVI14:
        case CHIP_NAVI12:
+               if (amdgpu_sriov_vf(adev))
+                       break;
                snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
                err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
                if (err) {
@@ -229,15 +174,6 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
 out2:
        release_firmware(adev->psp.ta_fw);
        adev->psp.ta_fw = NULL;
-out1:
-       release_firmware(adev->psp.asd_fw);
-       adev->psp.asd_fw = NULL;
-out:
-       dev_err(adev->dev,
-               "psp v11.0: Failed to load firmware \"%s\"\n", fw_name);
-       release_firmware(adev->psp.sos_fw);
-       adev->psp.sos_fw = NULL;
-
        return err;
 }
 
@@ -283,11 +219,8 @@ static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp)
        /* Check tOS sign of life register to confirm sys driver and sOS
         * are already been loaded.
         */
-       if (psp_v11_0_is_sos_alive(psp)) {
-               psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
-               dev_info(adev->dev, "sos fw version = 0x%x.\n", psp->sos_fw_version);
+       if (psp_v11_0_is_sos_alive(psp))
                return 0;
-       }
 
        ret = psp_v11_0_wait_for_bootloader(psp);
        if (ret)
@@ -319,11 +252,8 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
        /* Check sOS sign of life register to confirm sys driver and sOS
         * are already been loaded.
         */
-       if (psp_v11_0_is_sos_alive(psp)) {
-               psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
-               dev_info(adev->dev, "sos fw version = 0x%x.\n", psp->sos_fw_version);
+       if (psp_v11_0_is_sos_alive(psp))
                return 0;
-       }
 
        ret = psp_v11_0_wait_for_bootloader(psp);
        if (ret)
@@ -446,13 +376,6 @@ static int psp_v11_0_ring_init(struct psp_context *psp,
        return 0;
 }
 
-static bool psp_v11_0_support_vmr_ring(struct psp_context *psp)
-{
-       if (amdgpu_sriov_vf(psp->adev) && psp->sos_fw_version > 0x80045)
-               return true;
-       return false;
-}
-
 static int psp_v11_0_ring_stop(struct psp_context *psp,
                              enum psp_ring_type ring_type)
 {
@@ -460,7 +383,7 @@ static int psp_v11_0_ring_stop(struct psp_context *psp,
        struct amdgpu_device *adev = psp->adev;
 
        /* Write the ring destroy command*/
-       if (psp_v11_0_support_vmr_ring(psp))
+       if (amdgpu_sriov_vf(adev))
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
                                     GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
        else
@@ -471,7 +394,7 @@ static int psp_v11_0_ring_stop(struct psp_context *psp,
        mdelay(20);
 
        /* Wait for response flag (bit 31) */
-       if (psp_v11_0_support_vmr_ring(psp))
+       if (amdgpu_sriov_vf(adev))
                ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
                                   0x80000000, 0x80000000, false);
        else
@@ -489,7 +412,7 @@ static int psp_v11_0_ring_create(struct psp_context *psp,
        struct psp_ring *ring = &psp->km_ring;
        struct amdgpu_device *adev = psp->adev;
 
-       if (psp_v11_0_support_vmr_ring(psp)) {
+       if (amdgpu_sriov_vf(adev)) {
                ret = psp_v11_0_ring_stop(psp, ring_type);
                if (ret) {
                        DRM_ERROR("psp_v11_0_ring_stop_sriov failed!\n");
@@ -567,138 +490,6 @@ static int psp_v11_0_ring_destroy(struct psp_context *psp,
        return ret;
 }
 
-static int
-psp_v11_0_sram_map(struct amdgpu_device *adev,
-                 unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
-                 unsigned int *sram_data_reg_offset,
-                 enum AMDGPU_UCODE_ID ucode_id)
-{
-       int ret = 0;
-
-       switch (ucode_id) {
-/* TODO: needs to confirm */
-#if 0
-       case AMDGPU_UCODE_ID_SMC:
-               *sram_offset = 0;
-               *sram_addr_reg_offset = 0;
-               *sram_data_reg_offset = 0;
-               break;
-#endif
-
-       case AMDGPU_UCODE_ID_CP_CE:
-               *sram_offset = 0x0;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_CP_PFP:
-               *sram_offset = 0x0;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_CP_ME:
-               *sram_offset = 0x0;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_CP_MEC1:
-               *sram_offset = 0x10000;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_CP_MEC2:
-               *sram_offset = 0x10000;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_RLC_G:
-               *sram_offset = 0x2000;
-               if (adev->asic_type < CHIP_NAVI10) {
-                       *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_ADDR);
-                       *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_DATA);
-               } else {
-                       *sram_addr_reg_offset = adev->reg_offset[GC_HWIP][0][1] + mmRLC_GPM_UCODE_ADDR_NV10;
-                       *sram_data_reg_offset = adev->reg_offset[GC_HWIP][0][1] + mmRLC_GPM_UCODE_DATA_NV10;
-               }
-               break;
-
-       case AMDGPU_UCODE_ID_SDMA0:
-               *sram_offset = 0x0;
-               if (adev->asic_type < CHIP_NAVI10) {
-                       *sram_addr_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_ADDR);
-                       *sram_data_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_DATA);
-               } else {
-                       *sram_addr_reg_offset = adev->reg_offset[GC_HWIP][0][1] + mmSDMA0_UCODE_ADDR_NV10;
-                       *sram_data_reg_offset = adev->reg_offset[GC_HWIP][0][1] + mmSDMA0_UCODE_DATA_NV10;
-               }
-               break;
-
-/* TODO: needs to confirm */
-#if 0
-       case AMDGPU_UCODE_ID_SDMA1:
-               *sram_offset = ;
-               *sram_addr_reg_offset = ;
-               break;
-
-       case AMDGPU_UCODE_ID_UVD:
-               *sram_offset = ;
-               *sram_addr_reg_offset = ;
-               break;
-
-       case AMDGPU_UCODE_ID_VCE:
-               *sram_offset = ;
-               *sram_addr_reg_offset = ;
-               break;
-#endif
-
-       case AMDGPU_UCODE_ID_MAXIMUM:
-       default:
-               ret = -EINVAL;
-               break;
-       }
-
-       return ret;
-}
-
-static bool psp_v11_0_compare_sram_data(struct psp_context *psp,
-                                      struct amdgpu_firmware_info *ucode,
-                                      enum AMDGPU_UCODE_ID ucode_type)
-{
-       int err = 0;
-       unsigned int fw_sram_reg_val = 0;
-       unsigned int fw_sram_addr_reg_offset = 0;
-       unsigned int fw_sram_data_reg_offset = 0;
-       unsigned int ucode_size;
-       uint32_t *ucode_mem = NULL;
-       struct amdgpu_device *adev = psp->adev;
-
-       err = psp_v11_0_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset,
-                               &fw_sram_data_reg_offset, ucode_type);
-       if (err)
-               return false;
-
-       WREG32(fw_sram_addr_reg_offset, fw_sram_reg_val);
-
-       ucode_size = ucode->ucode_size;
-       ucode_mem = (uint32_t *)ucode->kaddr;
-       while (ucode_size) {
-               fw_sram_reg_val = RREG32(fw_sram_data_reg_offset);
-
-               if (*ucode_mem != fw_sram_reg_val)
-                       return false;
-
-               ucode_mem++;
-               /* 4 bytes */
-               ucode_size -= 4;
-       }
-
-       return true;
-}
-
 static int psp_v11_0_mode1_reset(struct psp_context *psp)
 {
        int ret;
@@ -733,181 +524,6 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp)
        return 0;
 }
 
-/* TODO: Fill in follow functions once PSP firmware interface for XGMI is ready.
- * For now, return success and hack the hive_id so high level code can
- * start testing
- */
-static int psp_v11_0_xgmi_get_topology_info(struct psp_context *psp,
-       int number_devices, struct psp_xgmi_topology_info *topology)
-{
-       struct ta_xgmi_shared_memory *xgmi_cmd;
-       struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
-       struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
-       int i;
-       int ret;
-
-       if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
-               return -EINVAL;
-
-       xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
-       memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
-
-       /* Fill in the shared memory with topology information as input */
-       topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
-       xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO;
-       topology_info_input->num_nodes = number_devices;
-
-       for (i = 0; i < topology_info_input->num_nodes; i++) {
-               topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
-               topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
-               topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
-               topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
-       }
-
-       /* Invoke xgmi ta to get the topology information */
-       ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO);
-       if (ret)
-               return ret;
-
-       /* Read the output topology information from the shared memory */
-       topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
-       topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
-       for (i = 0; i < topology->num_nodes; i++) {
-               topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
-               topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
-               topology->nodes[i].is_sharing_enabled = topology_info_output->nodes[i].is_sharing_enabled;
-               topology->nodes[i].sdma_engine = topology_info_output->nodes[i].sdma_engine;
-       }
-
-       return 0;
-}
-
-static int psp_v11_0_xgmi_set_topology_info(struct psp_context *psp,
-       int number_devices, struct psp_xgmi_topology_info *topology)
-{
-       struct ta_xgmi_shared_memory *xgmi_cmd;
-       struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
-       int i;
-
-       if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
-               return -EINVAL;
-
-       xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
-       memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
-
-       topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
-       xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
-       topology_info_input->num_nodes = number_devices;
-
-       for (i = 0; i < topology_info_input->num_nodes; i++) {
-               topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
-               topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
-               topology_info_input->nodes[i].is_sharing_enabled = 1;
-               topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
-       }
-
-       /* Invoke xgmi ta to set topology information */
-       return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
-}
-
-static int psp_v11_0_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
-{
-       struct ta_xgmi_shared_memory *xgmi_cmd;
-       int ret;
-
-       xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
-       memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
-
-       xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
-
-       /* Invoke xgmi ta to get hive id */
-       ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
-       if (ret)
-               return ret;
-
-       *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
-
-       return 0;
-}
-
-static int psp_v11_0_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
-{
-       struct ta_xgmi_shared_memory *xgmi_cmd;
-       int ret;
-
-       xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
-       memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
-
-       xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
-
-       /* Invoke xgmi ta to get the node id */
-       ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
-       if (ret)
-               return ret;
-
-       *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
-
-       return 0;
-}
-
-static int psp_v11_0_ras_trigger_error(struct psp_context *psp,
-               struct ta_ras_trigger_error_input *info)
-{
-       struct ta_ras_shared_memory *ras_cmd;
-       int ret;
-
-       if (!psp->ras.ras_initialized)
-               return -EINVAL;
-
-       ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
-       memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
-
-       ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR;
-       ras_cmd->ras_in_message.trigger_error = *info;
-
-       ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
-       if (ret)
-               return -EINVAL;
-
-       /* If err_event_athub occurs error inject was successful, however
-          return status from TA is no long reliable */
-       if (amdgpu_ras_intr_triggered())
-               return 0;
-
-       return ras_cmd->ras_status;
-}
-
-static int psp_v11_0_ras_cure_posion(struct psp_context *psp, uint64_t *mode_ptr)
-{
-#if 0
-       // not support yet.
-       struct ta_ras_shared_memory *ras_cmd;
-       int ret;
-
-       if (!psp->ras.ras_initialized)
-               return -EINVAL;
-
-       ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
-       memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
-
-       ras_cmd->cmd_id = TA_RAS_COMMAND__CURE_POISON;
-       ras_cmd->ras_in_message.cure_poison.mode_ptr = mode_ptr;
-
-       ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
-       if (ret)
-               return -EINVAL;
-
-       return ras_cmd->ras_status;
-#else
-       return -EINVAL;
-#endif
-}
-
-static int psp_v11_0_rlc_autoload_start(struct psp_context *psp)
-{
-       return psp_rlc_autoload_start(psp);
-}
-
 static int psp_v11_0_memory_training_send_msg(struct psp_context *psp, int msg)
 {
        int ret;
@@ -1099,7 +715,7 @@ static uint32_t psp_v11_0_ring_get_wptr(struct psp_context *psp)
        uint32_t data;
        struct amdgpu_device *adev = psp->adev;
 
-       if (psp_v11_0_support_vmr_ring(psp))
+       if (amdgpu_sriov_vf(adev))
                data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
        else
                data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
@@ -1111,7 +727,7 @@ static void psp_v11_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
 {
        struct amdgpu_device *adev = psp->adev;
 
-       if (psp_v11_0_support_vmr_ring(psp)) {
+       if (amdgpu_sriov_vf(adev)) {
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value);
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
        } else
@@ -1203,16 +819,7 @@ static const struct psp_funcs psp_v11_0_funcs = {
        .ring_create = psp_v11_0_ring_create,
        .ring_stop = psp_v11_0_ring_stop,
        .ring_destroy = psp_v11_0_ring_destroy,
-       .compare_sram_data = psp_v11_0_compare_sram_data,
        .mode1_reset = psp_v11_0_mode1_reset,
-       .xgmi_get_topology_info = psp_v11_0_xgmi_get_topology_info,
-       .xgmi_set_topology_info = psp_v11_0_xgmi_set_topology_info,
-       .xgmi_get_hive_id = psp_v11_0_xgmi_get_hive_id,
-       .xgmi_get_node_id = psp_v11_0_xgmi_get_node_id,
-       .support_vmr_ring = psp_v11_0_support_vmr_ring,
-       .ras_trigger_error = psp_v11_0_ras_trigger_error,
-       .ras_cure_posion = psp_v11_0_ras_cure_posion,
-       .rlc_autoload_start = psp_v11_0_rlc_autoload_start,
        .mem_training_init = psp_v11_0_memory_training_init,
        .mem_training_fini = psp_v11_0_memory_training_fini,
        .mem_training = psp_v11_0_memory_training,
index 58d8b6d732e8f0b6e26fc52738dad013813c03e7..6c9614f77d33e5ffae5004d6e20bb8cc24a36464 100644 (file)
@@ -45,11 +45,7 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
 {
        struct amdgpu_device *adev = psp->adev;
        const char *chip_name;
-       char fw_name[30];
        int err = 0;
-       const struct psp_firmware_header_v1_0 *asd_hdr;
-
-       DRM_DEBUG("\n");
 
        switch (adev->asic_type) {
        case CHIP_RENOIR:
@@ -59,28 +55,7 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
                BUG();
        }
 
-       snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
-       err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
-       if (err)
-               goto out1;
-
-       err = amdgpu_ucode_validate(adev->psp.asd_fw);
-       if (err)
-               goto out1;
-
-       asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
-       adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
-       adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version);
-       adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
-       adev->psp.asd_start_addr = (uint8_t *)asd_hdr +
-                               le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
-
-       return 0;
-
-out1:
-       release_firmware(adev->psp.asd_fw);
-       adev->psp.asd_fw = NULL;
-
+       err = psp_init_asd_microcode(psp, chip_name);
        return err;
 }
 
@@ -95,11 +70,8 @@ static int psp_v12_0_bootloader_load_sysdrv(struct psp_context *psp)
         * are already been loaded.
         */
        sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
-       if (sol_reg) {
-               psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
-               printk("sos fw version = 0x%x.\n", psp->sos_fw_version);
+       if (sol_reg)
                return 0;
-       }
 
        /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
        ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
@@ -228,13 +200,6 @@ static int psp_v12_0_ring_init(struct psp_context *psp,
        return 0;
 }
 
-static bool psp_v12_0_support_vmr_ring(struct psp_context *psp)
-{
-       if (amdgpu_sriov_vf(psp->adev) && psp->sos_fw_version > 0x80045)
-               return true;
-       return false;
-}
-
 static int psp_v12_0_ring_create(struct psp_context *psp,
                                enum psp_ring_type ring_type)
 {
@@ -243,7 +208,7 @@ static int psp_v12_0_ring_create(struct psp_context *psp,
        struct psp_ring *ring = &psp->km_ring;
        struct amdgpu_device *adev = psp->adev;
 
-       if (psp_v12_0_support_vmr_ring(psp)) {
+       if (amdgpu_sriov_vf(psp->adev)) {
                /* Write low address of the ring to C2PMSG_102 */
                psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg);
@@ -295,7 +260,7 @@ static int psp_v12_0_ring_stop(struct psp_context *psp,
        struct amdgpu_device *adev = psp->adev;
 
        /* Write the ring destroy command*/
-       if (psp_v12_0_support_vmr_ring(psp))
+       if (amdgpu_sriov_vf(adev))
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
                                     GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
        else
@@ -306,7 +271,7 @@ static int psp_v12_0_ring_stop(struct psp_context *psp,
        mdelay(20);
 
        /* Wait for response flag (bit 31) */
-       if (psp_v12_0_support_vmr_ring(psp))
+       if (amdgpu_sriov_vf(adev))
                ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
                                   0x80000000, 0x80000000, false);
        else
@@ -334,128 +299,6 @@ static int psp_v12_0_ring_destroy(struct psp_context *psp,
        return ret;
 }
 
-static int
-psp_v12_0_sram_map(struct amdgpu_device *adev,
-                 unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
-                 unsigned int *sram_data_reg_offset,
-                 enum AMDGPU_UCODE_ID ucode_id)
-{
-       int ret = 0;
-
-       switch (ucode_id) {
-/* TODO: needs to confirm */
-#if 0
-       case AMDGPU_UCODE_ID_SMC:
-               *sram_offset = 0;
-               *sram_addr_reg_offset = 0;
-               *sram_data_reg_offset = 0;
-               break;
-#endif
-
-       case AMDGPU_UCODE_ID_CP_CE:
-               *sram_offset = 0x0;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_CP_PFP:
-               *sram_offset = 0x0;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_CP_ME:
-               *sram_offset = 0x0;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_CP_MEC1:
-               *sram_offset = 0x10000;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_CP_MEC2:
-               *sram_offset = 0x10000;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_RLC_G:
-               *sram_offset = 0x2000;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_SDMA0:
-               *sram_offset = 0x0;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_DATA);
-               break;
-
-/* TODO: needs to confirm */
-#if 0
-       case AMDGPU_UCODE_ID_SDMA1:
-               *sram_offset = ;
-               *sram_addr_reg_offset = ;
-               break;
-
-       case AMDGPU_UCODE_ID_UVD:
-               *sram_offset = ;
-               *sram_addr_reg_offset = ;
-               break;
-
-       case AMDGPU_UCODE_ID_VCE:
-               *sram_offset = ;
-               *sram_addr_reg_offset = ;
-               break;
-#endif
-
-       case AMDGPU_UCODE_ID_MAXIMUM:
-       default:
-               ret = -EINVAL;
-               break;
-       }
-
-       return ret;
-}
-
-static bool psp_v12_0_compare_sram_data(struct psp_context *psp,
-                                      struct amdgpu_firmware_info *ucode,
-                                      enum AMDGPU_UCODE_ID ucode_type)
-{
-       int err = 0;
-       unsigned int fw_sram_reg_val = 0;
-       unsigned int fw_sram_addr_reg_offset = 0;
-       unsigned int fw_sram_data_reg_offset = 0;
-       unsigned int ucode_size;
-       uint32_t *ucode_mem = NULL;
-       struct amdgpu_device *adev = psp->adev;
-
-       err = psp_v12_0_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset,
-                               &fw_sram_data_reg_offset, ucode_type);
-       if (err)
-               return false;
-
-       WREG32(fw_sram_addr_reg_offset, fw_sram_reg_val);
-
-       ucode_size = ucode->ucode_size;
-       ucode_mem = (uint32_t *)ucode->kaddr;
-       while (ucode_size) {
-               fw_sram_reg_val = RREG32(fw_sram_data_reg_offset);
-
-               if (*ucode_mem != fw_sram_reg_val)
-                       return false;
-
-               ucode_mem++;
-               /* 4 bytes */
-               ucode_size -= 4;
-       }
-
-       return true;
-}
-
 static int psp_v12_0_mode1_reset(struct psp_context *psp)
 {
        int ret;
@@ -495,7 +338,7 @@ static uint32_t psp_v12_0_ring_get_wptr(struct psp_context *psp)
        uint32_t data;
        struct amdgpu_device *adev = psp->adev;
 
-       if (psp_v12_0_support_vmr_ring(psp))
+       if (amdgpu_sriov_vf(adev))
                data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
        else
                data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
@@ -507,7 +350,7 @@ static void psp_v12_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
 {
        struct amdgpu_device *adev = psp->adev;
 
-       if (psp_v12_0_support_vmr_ring(psp)) {
+       if (amdgpu_sriov_vf(adev)) {
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value);
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
        } else
@@ -522,7 +365,6 @@ static const struct psp_funcs psp_v12_0_funcs = {
        .ring_create = psp_v12_0_ring_create,
        .ring_stop = psp_v12_0_ring_stop,
        .ring_destroy = psp_v12_0_ring_destroy,
-       .compare_sram_data = psp_v12_0_compare_sram_data,
        .mode1_reset = psp_v12_0_mode1_reset,
        .ring_get_wptr = psp_v12_0_ring_get_wptr,
        .ring_set_wptr = psp_v12_0_ring_set_wptr,
index 735c43c7daab921e283f8bcf7aedcde5c8cae386..f2e725f72d2f1644e1a786084df6782c4fe9aad3 100644 (file)
@@ -50,9 +50,6 @@ MODULE_FIRMWARE("amdgpu/vega12_asd.bin");
 
 #define smnMP1_FIRMWARE_FLAGS 0x3010028
 
-static uint32_t sos_old_versions[] = {1517616, 1510592, 1448594, 1446554};
-
-static bool psp_v3_1_support_vmr_ring(struct psp_context *psp);
 static int psp_v3_1_ring_stop(struct psp_context *psp,
                              enum psp_ring_type ring_type);
 
@@ -60,9 +57,7 @@ static int psp_v3_1_init_microcode(struct psp_context *psp)
 {
        struct amdgpu_device *adev = psp->adev;
        const char *chip_name;
-       char fw_name[30];
        int err = 0;
-       const struct psp_firmware_header_v1_0 *hdr;
 
        DRM_DEBUG("\n");
 
@@ -76,55 +71,15 @@ static int psp_v3_1_init_microcode(struct psp_context *psp)
        default: BUG();
        }
 
-       snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name);
-       err = request_firmware(&adev->psp.sos_fw, fw_name, adev->dev);
-       if (err)
-               goto out;
-
-       err = amdgpu_ucode_validate(adev->psp.sos_fw);
+       err = psp_init_sos_microcode(psp, chip_name);
        if (err)
-               goto out;
-
-       hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
-       adev->psp.sos_fw_version = le32_to_cpu(hdr->header.ucode_version);
-       adev->psp.sos_feature_version = le32_to_cpu(hdr->ucode_feature_version);
-       adev->psp.sos_bin_size = le32_to_cpu(hdr->sos_size_bytes);
-       adev->psp.sys_bin_size = le32_to_cpu(hdr->header.ucode_size_bytes) -
-                                       le32_to_cpu(hdr->sos_size_bytes);
-       adev->psp.sys_start_addr = (uint8_t *)hdr +
-                               le32_to_cpu(hdr->header.ucode_array_offset_bytes);
-       adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr +
-                               le32_to_cpu(hdr->sos_offset_bytes);
-
-       snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
-       err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
-       if (err)
-               goto out;
+               return err;
 
-       err = amdgpu_ucode_validate(adev->psp.asd_fw);
+       err = psp_init_asd_microcode(psp, chip_name);
        if (err)
-               goto out;
-
-       hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
-       adev->psp.asd_fw_version = le32_to_cpu(hdr->header.ucode_version);
-       adev->psp.asd_feature_version = le32_to_cpu(hdr->ucode_feature_version);
-       adev->psp.asd_ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
-       adev->psp.asd_start_addr = (uint8_t *)hdr +
-                               le32_to_cpu(hdr->header.ucode_array_offset_bytes);
+               return err;
 
        return 0;
-out:
-       if (err) {
-               dev_err(adev->dev,
-                       "psp v3.1: Failed to load firmware \"%s\"\n",
-                       fw_name);
-               release_firmware(adev->psp.sos_fw);
-               adev->psp.sos_fw = NULL;
-               release_firmware(adev->psp.asd_fw);
-               adev->psp.asd_fw = NULL;
-       }
-
-       return err;
 }
 
 static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
@@ -168,41 +123,19 @@ static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
        return ret;
 }
 
-static bool psp_v3_1_match_version(struct amdgpu_device *adev, uint32_t ver)
-{
-       int i;
-
-       if (ver == adev->psp.sos_fw_version)
-               return true;
-
-       /*
-        * Double check if the latest four legacy versions.
-        * If yes, it is still the right version.
-        */
-       for (i = 0; i < ARRAY_SIZE(sos_old_versions); i++) {
-               if (sos_old_versions[i] == adev->psp.sos_fw_version)
-                       return true;
-       }
-
-       return false;
-}
-
 static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
 {
        int ret;
        unsigned int psp_gfxdrv_command_reg = 0;
        struct amdgpu_device *adev = psp->adev;
-       uint32_t sol_reg, ver;
+       uint32_t sol_reg;
 
        /* Check sOS sign of life register to confirm sys driver and sOS
         * are already been loaded.
         */
        sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
-       if (sol_reg) {
-               psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
-               printk("sos fw version = 0x%x.\n", psp->sos_fw_version);
+       if (sol_reg)
                return 0;
-       }
 
        /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
        ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
@@ -227,11 +160,6 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
        ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81),
                           RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81),
                           0, true);
-
-       ver = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
-       if (!psp_v3_1_match_version(adev, ver))
-               DRM_WARN("SOS version doesn't match\n");
-
        return ret;
 }
 
@@ -302,7 +230,7 @@ static int psp_v3_1_ring_create(struct psp_context *psp,
 
        psp_v3_1_reroute_ih(psp);
 
-       if (psp_v3_1_support_vmr_ring(psp)) {
+       if (amdgpu_sriov_vf(adev)) {
                ret = psp_v3_1_ring_stop(psp, ring_type);
                if (ret) {
                        DRM_ERROR("psp_v3_1_ring_stop_sriov failed!\n");
@@ -360,34 +288,26 @@ static int psp_v3_1_ring_stop(struct psp_context *psp,
                              enum psp_ring_type ring_type)
 {
        int ret = 0;
-       unsigned int psp_ring_reg = 0;
        struct amdgpu_device *adev = psp->adev;
 
-       if (psp_v3_1_support_vmr_ring(psp)) {
-               /* Write the Destroy GPCOM ring command to C2PMSG_101 */
-               psp_ring_reg = GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING;
-               WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, psp_ring_reg);
-
-               /* there might be handshake issue which needs delay */
-               mdelay(20);
-
-               /* Wait for response flag (bit 31) in C2PMSG_101 */
-               ret = psp_wait_for(psp,
-                               SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
-                               0x80000000, 0x80000000, false);
-       } else {
-               /* Write the ring destroy command to C2PMSG_64 */
-               psp_ring_reg = 3 << 16;
-               WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
+       /* Write the ring destroy command*/
+       if (amdgpu_sriov_vf(adev))
+               WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
+                                    GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
+       else
+               WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64,
+                                    GFX_CTRL_CMD_ID_DESTROY_RINGS);
 
-               /* there might be handshake issue which needs delay */
-               mdelay(20);
+       /* there might be handshake issue with hardware which needs delay */
+       mdelay(20);
 
-               /* Wait for response flag (bit 31) in C2PMSG_64 */
-               ret = psp_wait_for(psp,
-                               SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
-                               0x80000000, 0x80000000, false);
-       }
+       /* Wait for response flag (bit 31) */
+       if (amdgpu_sriov_vf(adev))
+               ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+                                  0x80000000, 0x80000000, false);
+       else
+               ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+                                  0x80000000, 0x80000000, false);
 
        return ret;
 }
@@ -410,128 +330,6 @@ static int psp_v3_1_ring_destroy(struct psp_context *psp,
        return ret;
 }
 
-static int
-psp_v3_1_sram_map(struct amdgpu_device *adev,
-                 unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
-                 unsigned int *sram_data_reg_offset,
-                 enum AMDGPU_UCODE_ID ucode_id)
-{
-       int ret = 0;
-
-       switch(ucode_id) {
-/* TODO: needs to confirm */
-#if 0
-       case AMDGPU_UCODE_ID_SMC:
-               *sram_offset = 0;
-               *sram_addr_reg_offset = 0;
-               *sram_data_reg_offset = 0;
-               break;
-#endif
-
-       case AMDGPU_UCODE_ID_CP_CE:
-               *sram_offset = 0x0;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_CP_PFP:
-               *sram_offset = 0x0;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_CP_ME:
-               *sram_offset = 0x0;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_CP_MEC1:
-               *sram_offset = 0x10000;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_CP_MEC2:
-               *sram_offset = 0x10000;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_RLC_G:
-               *sram_offset = 0x2000;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_DATA);
-               break;
-
-       case AMDGPU_UCODE_ID_SDMA0:
-               *sram_offset = 0x0;
-               *sram_addr_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_ADDR);
-               *sram_data_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_DATA);
-               break;
-
-/* TODO: needs to confirm */
-#if 0
-       case AMDGPU_UCODE_ID_SDMA1:
-               *sram_offset = ;
-               *sram_addr_reg_offset = ;
-               break;
-
-       case AMDGPU_UCODE_ID_UVD:
-               *sram_offset = ;
-               *sram_addr_reg_offset = ;
-               break;
-
-       case AMDGPU_UCODE_ID_VCE:
-               *sram_offset = ;
-               *sram_addr_reg_offset = ;
-               break;
-#endif
-
-       case AMDGPU_UCODE_ID_MAXIMUM:
-       default:
-               ret = -EINVAL;
-               break;
-       }
-
-       return ret;
-}
-
-static bool psp_v3_1_compare_sram_data(struct psp_context *psp,
-                                      struct amdgpu_firmware_info *ucode,
-                                      enum AMDGPU_UCODE_ID ucode_type)
-{
-       int err = 0;
-       unsigned int fw_sram_reg_val = 0;
-       unsigned int fw_sram_addr_reg_offset = 0;
-       unsigned int fw_sram_data_reg_offset = 0;
-       unsigned int ucode_size;
-       uint32_t *ucode_mem = NULL;
-       struct amdgpu_device *adev = psp->adev;
-
-       err = psp_v3_1_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset,
-                               &fw_sram_data_reg_offset, ucode_type);
-       if (err)
-               return false;
-
-       WREG32(fw_sram_addr_reg_offset, fw_sram_reg_val);
-
-       ucode_size = ucode->ucode_size;
-       ucode_mem = (uint32_t *)ucode->kaddr;
-       while (ucode_size) {
-               fw_sram_reg_val = RREG32(fw_sram_data_reg_offset);
-
-               if (*ucode_mem != fw_sram_reg_val)
-                       return false;
-
-               ucode_mem++;
-               /* 4 bytes */
-               ucode_size -= 4;
-       }
-
-       return true;
-}
-
 static bool psp_v3_1_smu_reload_quirk(struct psp_context *psp)
 {
        struct amdgpu_device *adev = psp->adev;
@@ -575,20 +373,12 @@ static int psp_v3_1_mode1_reset(struct psp_context *psp)
        return 0;
 }
 
-static bool psp_v3_1_support_vmr_ring(struct psp_context *psp)
-{
-       if (amdgpu_sriov_vf(psp->adev))
-               return true;
-
-       return false;
-}
-
 static uint32_t psp_v3_1_ring_get_wptr(struct psp_context *psp)
 {
        uint32_t data;
        struct amdgpu_device *adev = psp->adev;
 
-       if (psp_v3_1_support_vmr_ring(psp))
+       if (amdgpu_sriov_vf(adev))
                data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
        else
                data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
@@ -599,7 +389,7 @@ static void psp_v3_1_ring_set_wptr(struct psp_context *psp, uint32_t value)
 {
        struct amdgpu_device *adev = psp->adev;
 
-       if (psp_v3_1_support_vmr_ring(psp)) {
+       if (amdgpu_sriov_vf(adev)) {
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value);
                /* send interrupt to PSP for SRIOV ring write pointer update */
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
@@ -616,10 +406,8 @@ static const struct psp_funcs psp_v3_1_funcs = {
        .ring_create = psp_v3_1_ring_create,
        .ring_stop = psp_v3_1_ring_stop,
        .ring_destroy = psp_v3_1_ring_destroy,
-       .compare_sram_data = psp_v3_1_compare_sram_data,
        .smu_reload_quirk = psp_v3_1_smu_reload_quirk,
        .mode1_reset = psp_v3_1_mode1_reset,
-       .support_vmr_ring = psp_v3_1_support_vmr_ring,
        .ring_get_wptr = psp_v3_1_ring_get_wptr,
        .ring_set_wptr = psp_v3_1_ring_set_wptr,
 };
index 7d509a40076fa021f3724a4258fa779afe005a64..5f304d61999ebf9c3257ebfcbd48e46a4d26b2a5 100644 (file)
@@ -355,8 +355,6 @@ static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
                ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
                WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
        }
-       sdma0->sched.ready = false;
-       sdma1->sched.ready = false;
 }
 
 /**
@@ -614,7 +612,8 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        tmp = 0xCAFEDEAD;
        adev->wb.wb[index] = cpu_to_le32(tmp);
        memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, 256, &ib);
+       r = amdgpu_ib_get(adev, NULL, 256,
+                                       AMDGPU_IB_POOL_DIRECT, &ib);
        if (r)
                goto err0;
 
@@ -874,7 +873,8 @@ static int sdma_v2_4_sw_init(void *handle)
                                     &adev->sdma.trap_irq,
                                     (i == 0) ?
                                     AMDGPU_SDMA_IRQ_INSTANCE0 :
-                                    AMDGPU_SDMA_IRQ_INSTANCE1);
+                                    AMDGPU_SDMA_IRQ_INSTANCE1,
+                                    AMDGPU_RING_PRIO_DEFAULT);
                if (r)
                        return r;
        }
@@ -1200,7 +1200,8 @@ static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev)
 static void sdma_v2_4_emit_copy_buffer(struct amdgpu_ib *ib,
                                       uint64_t src_offset,
                                       uint64_t dst_offset,
-                                      uint32_t byte_count)
+                                      uint32_t byte_count,
+                                      bool tmz)
 {
        ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
                SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
index b6109a99fc43c4e35d27de930b93756d5c612d8e..c59f6f6f4c0917c161e3214ca8428f4440bfd95b 100644 (file)
@@ -529,8 +529,6 @@ static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
                ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
                WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
        }
-       sdma0->sched.ready = false;
-       sdma1->sched.ready = false;
 }
 
 /**
@@ -886,7 +884,8 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        tmp = 0xCAFEDEAD;
        adev->wb.wb[index] = cpu_to_le32(tmp);
        memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, 256, &ib);
+       r = amdgpu_ib_get(adev, NULL, 256,
+                                       AMDGPU_IB_POOL_DIRECT, &ib);
        if (r)
                goto err0;
 
@@ -1158,7 +1157,8 @@ static int sdma_v3_0_sw_init(void *handle)
                                     &adev->sdma.trap_irq,
                                     (i == 0) ?
                                     AMDGPU_SDMA_IRQ_INSTANCE0 :
-                                    AMDGPU_SDMA_IRQ_INSTANCE1);
+                                    AMDGPU_SDMA_IRQ_INSTANCE1,
+                                    AMDGPU_RING_PRIO_DEFAULT);
                if (r)
                        return r;
        }
@@ -1638,7 +1638,8 @@ static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev)
 static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ib *ib,
                                       uint64_t src_offset,
                                       uint64_t dst_offset,
-                                      uint32_t byte_count)
+                                      uint32_t byte_count,
+                                      bool tmz)
 {
        ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
                SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
index 5f3a5ee2a3f4e20948678c36e733e8d8875d8752..33501c6c71895e8ee33e8282bfe2cc2555050ec2 100644 (file)
@@ -115,17 +115,21 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
 static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
+       SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
-       SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002)
+       SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
+       SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
 };
 
 static const struct soc15_reg_golden golden_settings_sdma_vg12[] = {
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001),
+       SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
-       SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001)
+       SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001),
+       SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
 };
 
 static const struct soc15_reg_golden golden_settings_sdma_4_1[] = {
@@ -174,6 +178,7 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2[] =
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+       SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
 };
 
 static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = {
@@ -203,6 +208,7 @@ static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = {
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+       SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
 };
 
 static const struct soc15_reg_golden golden_settings_sdma_rv1[] =
@@ -222,27 +228,35 @@ static const struct soc15_reg_golden golden_settings_sdma_arct[] =
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+       SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+       SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
        SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
        SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
        SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+       SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
        SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
        SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
        SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+       SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
        SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
        SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
        SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+       SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
        SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
        SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
        SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+       SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
        SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
        SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
        SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+       SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
        SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
        SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
-       SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002)
+       SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+       SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_UTCL1_TIMEOUT, 0xffffffff, 0x00010001)
 };
 
 static const struct soc15_reg_golden golden_settings_sdma_4_3[] = {
@@ -472,7 +486,7 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
                soc15_program_register_sequence(adev,
                                                golden_settings_sdma_4_1,
                                                ARRAY_SIZE(golden_settings_sdma_4_1));
-               if (adev->rev_id >= 8)
+               if (adev->apu_flags & AMD_APU_IS_RAVEN2)
                        soc15_program_register_sequence(adev,
                                                        golden_settings_sdma_rv2,
                                                        ARRAY_SIZE(golden_settings_sdma_rv2));
@@ -561,9 +575,9 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
                chip_name = "vega20";
                break;
        case CHIP_RAVEN:
-               if (adev->rev_id >= 8)
+               if (adev->apu_flags & AMD_APU_IS_RAVEN2)
                        chip_name = "raven2";
-               else if (adev->pdev->device == 0x15d8)
+               else if (adev->apu_flags & AMD_APU_IS_PICASSO)
                        chip_name = "picasso";
                else
                        chip_name = "raven";
@@ -923,8 +937,6 @@ static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
                ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL);
                ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
                WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
-
-               sdma[i]->sched.ready = false;
        }
 }
 
@@ -971,8 +983,6 @@ static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
                ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL,
                                        IB_ENABLE, 0);
                WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
-
-               sdma[i]->sched.ready = false;
        }
 }
 
@@ -1539,7 +1549,8 @@ static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        tmp = 0xCAFEDEAD;
        adev->wb.wb[index] = cpu_to_le32(tmp);
        memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, 256, &ib);
+       r = amdgpu_ib_get(adev, NULL, 256,
+                                       AMDGPU_IB_POOL_DIRECT, &ib);
        if (r)
                goto err0;
 
@@ -1840,7 +1851,7 @@ static int sdma_v4_0_sw_init(void *handle)
                ring->ring_obj = NULL;
                ring->use_doorbell = true;
 
-               DRM_INFO("use_doorbell being set to: [%s]\n",
+               DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
                                ring->use_doorbell?"true":"false");
 
                /* doorbell size is 2 dwords, get DWORD offset */
@@ -1848,7 +1859,8 @@ static int sdma_v4_0_sw_init(void *handle)
 
                sprintf(ring->name, "sdma%d", i);
                r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
-                                    AMDGPU_SDMA_IRQ_INSTANCE0 + i);
+                                    AMDGPU_SDMA_IRQ_INSTANCE0 + i,
+                                    AMDGPU_RING_PRIO_DEFAULT);
                if (r)
                        return r;
 
@@ -1866,7 +1878,8 @@ static int sdma_v4_0_sw_init(void *handle)
                        sprintf(ring->name, "page%d", i);
                        r = amdgpu_ring_init(adev, ring, 1024,
                                             &adev->sdma.trap_irq,
-                                            AMDGPU_SDMA_IRQ_INSTANCE0 + i);
+                                            AMDGPU_SDMA_IRQ_INSTANCE0 + i,
+                                            AMDGPU_RING_PRIO_DEFAULT);
                        if (r)
                                return r;
                }
@@ -2445,10 +2458,12 @@ static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev)
 static void sdma_v4_0_emit_copy_buffer(struct amdgpu_ib *ib,
                                       uint64_t src_offset,
                                       uint64_t dst_offset,
-                                      uint32_t byte_count)
+                                      uint32_t byte_count,
+                                      bool tmz)
 {
        ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
-               SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+               SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
+               SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0);
        ib->ptr[ib->length_dw++] = byte_count - 1;
        ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
        ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
index d2840c2f62865c23229d599fecfa530be2be7ce7..b544baf306f27b3a52920381bd1301afe56ba318 100644 (file)
@@ -88,6 +88,29 @@ static const struct soc15_reg_golden golden_settings_sdma_5[] = {
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_UTCL1_PAGE, 0x00ffffff, 0x000c5c00)
 };
 
+static const struct soc15_reg_golden golden_settings_sdma_5_sriov[] = {
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+};
+
 static const struct soc15_reg_golden golden_settings_sdma_nv10[] = {
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
@@ -141,9 +164,14 @@ static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev)
                                                (const u32)ARRAY_SIZE(golden_settings_sdma_nv14));
                break;
        case CHIP_NAVI12:
-               soc15_program_register_sequence(adev,
-                                               golden_settings_sdma_5,
-                                               (const u32)ARRAY_SIZE(golden_settings_sdma_5));
+               if (amdgpu_sriov_vf(adev))
+                       soc15_program_register_sequence(adev,
+                                                       golden_settings_sdma_5_sriov,
+                                                       (const u32)ARRAY_SIZE(golden_settings_sdma_5_sriov));
+               else
+                       soc15_program_register_sequence(adev,
+                                                       golden_settings_sdma_5,
+                                                       (const u32)ARRAY_SIZE(golden_settings_sdma_5));
                soc15_program_register_sequence(adev,
                                                golden_settings_sdma_nv12,
                                                (const u32)ARRAY_SIZE(golden_settings_sdma_nv12));
@@ -514,9 +542,6 @@ static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev)
                ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
                WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
        }
-
-       sdma0->sched.ready = false;
-       sdma1->sched.ready = false;
 }
 
 /**
@@ -541,7 +566,7 @@ static void sdma_v5_0_rlc_stop(struct amdgpu_device *adev)
  */
 static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
 {
-       u32 f32_cntl, phase_quantum = 0;
+       u32 f32_cntl = 0, phase_quantum = 0;
        int i;
 
        if (amdgpu_sdma_phase_quantum) {
@@ -569,9 +594,12 @@ static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
        }
 
        for (i = 0; i < adev->sdma.num_instances; i++) {
-               f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
-               f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
-                               AUTO_CTXSW_ENABLE, enable ? 1 : 0);
+               if (!amdgpu_sriov_vf(adev)) {
+                       f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
+                       f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
+                                                AUTO_CTXSW_ENABLE, enable ? 1 : 0);
+               }
+
                if (enable && amdgpu_sdma_phase_quantum) {
                        WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
                               phase_quantum);
@@ -580,7 +608,8 @@ static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
                        WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
                               phase_quantum);
                }
-               WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
+               if (!amdgpu_sriov_vf(adev))
+                       WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
        }
 
 }
@@ -603,6 +632,9 @@ static void sdma_v5_0_enable(struct amdgpu_device *adev, bool enable)
                sdma_v5_0_rlc_stop(adev);
        }
 
+       if (amdgpu_sriov_vf(adev))
+               return;
+
        for (i = 0; i < adev->sdma.num_instances; i++) {
                f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
                f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
@@ -635,7 +667,8 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
                ring = &adev->sdma.instance[i].ring;
                wb_offset = (ring->rptr_offs * 4);
 
-               WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
+               if (!amdgpu_sriov_vf(adev))
+                       WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
 
                /* Set ring buffer size in dwords */
                rb_bufsz = order_base_2(ring->ring_size / 4);
@@ -711,26 +744,28 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
                /* set minor_ptr_update to 0 after wptr programed */
                WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
 
-               /* set utc l1 enable flag always to 1 */
-               temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
-               temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
-
-               /* enable MCBP */
-               temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1);
-               WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
-
-               /* Set up RESP_MODE to non-copy addresses */
-               temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL));
-               temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
-               temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
-               WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp);
-
-               /* program default cache read and write policy */
-               temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE));
-               /* clean read policy and write policy bits */
-               temp &= 0xFF0FFF;
-               temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | (CACHE_WRITE_POLICY_L2__DEFAULT << 14));
-               WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp);
+               if (!amdgpu_sriov_vf(adev)) {
+                       /* set utc l1 enable flag always to 1 */
+                       temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
+                       temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
+
+                       /* enable MCBP */
+                       temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1);
+                       WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
+
+                       /* Set up RESP_MODE to non-copy addresses */
+                       temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL));
+                       temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
+                       temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
+                       WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp);
+
+                       /* program default cache read and write policy */
+                       temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE));
+                       /* clean read policy and write policy bits */
+                       temp &= 0xFF0FFF;
+                       temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | (CACHE_WRITE_POLICY_L2__DEFAULT << 14));
+                       WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp);
+               }
 
                if (!amdgpu_sriov_vf(adev)) {
                        /* unhalt engine */
@@ -960,7 +995,8 @@ static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        tmp = 0xCAFEDEAD;
        adev->wb.wb[index] = cpu_to_le32(tmp);
        memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, 256, &ib);
+       r = amdgpu_ib_get(adev, NULL, 256,
+                                       AMDGPU_IB_POOL_DIRECT, &ib);
        if (r) {
                DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
                goto err0;
@@ -1236,7 +1272,7 @@ static int sdma_v5_0_sw_init(void *handle)
                ring->ring_obj = NULL;
                ring->use_doorbell = true;
 
-               DRM_INFO("use_doorbell being set to: [%s]\n",
+               DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
                                ring->use_doorbell?"true":"false");
 
                ring->doorbell_index = (i == 0) ?
@@ -1248,7 +1284,8 @@ static int sdma_v5_0_sw_init(void *handle)
                                     &adev->sdma.trap_irq,
                                     (i == 0) ?
                                     AMDGPU_SDMA_IRQ_INSTANCE0 :
-                                    AMDGPU_SDMA_IRQ_INSTANCE1);
+                                    AMDGPU_SDMA_IRQ_INSTANCE1,
+                                    AMDGPU_RING_PRIO_DEFAULT);
                if (r)
                        return r;
        }
@@ -1399,14 +1436,16 @@ static int sdma_v5_0_set_trap_irq_state(struct amdgpu_device *adev,
 {
        u32 sdma_cntl;
 
-       u32 reg_offset = (type == AMDGPU_SDMA_IRQ_INSTANCE0) ?
-               sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_CNTL) :
-               sdma_v5_0_get_reg_offset(adev, 1, mmSDMA0_CNTL);
+       if (!amdgpu_sriov_vf(adev)) {
+               u32 reg_offset = (type == AMDGPU_SDMA_IRQ_INSTANCE0) ?
+                       sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_CNTL) :
+                       sdma_v5_0_get_reg_offset(adev, 1, mmSDMA0_CNTL);
 
-       sdma_cntl = RREG32(reg_offset);
-       sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
-                      state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
-       WREG32(reg_offset, sdma_cntl);
+               sdma_cntl = RREG32(reg_offset);
+               sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
+                                         state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+               WREG32(reg_offset, sdma_cntl);
+       }
 
        return 0;
 }
@@ -1667,10 +1706,12 @@ static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev)
 static void sdma_v5_0_emit_copy_buffer(struct amdgpu_ib *ib,
                                       uint64_t src_offset,
                                       uint64_t dst_offset,
-                                      uint32_t byte_count)
+                                      uint32_t byte_count,
+                                      bool tmz)
 {
        ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
-               SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+               SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
+               SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0);
        ib->ptr[ib->length_dw++] = byte_count - 1;
        ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
        ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
index 4d415bfdb42ff2ac62e4431d90a64a9ee3692b2d..153db3f763bc15d4a012b413e835208b01af11b5 100644 (file)
@@ -1249,12 +1249,6 @@ static int si_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
        return 0;
 }
 
-static void si_detect_hw_virtualization(struct amdgpu_device *adev)
-{
-       if (is_virtual_machine()) /* passthrough mode */
-               adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
-}
-
 static void si_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
 {
        if (!ring || !ring->funcs->emit_wreg) {
@@ -2165,8 +2159,6 @@ static const struct amdgpu_ip_block_version si_common_ip_block =
 
 int si_set_ip_blocks(struct amdgpu_device *adev)
 {
-       si_detect_hw_virtualization(adev);
-
        switch (adev->asic_type) {
        case CHIP_VERDE:
        case CHIP_TAHITI:
index 42d5601b6bf35233f7fbea9cad6181da0ce6198f..7d2bbcbe547b2ad53c5b091c2bf09e055dfdf2e1 100644 (file)
@@ -124,7 +124,6 @@ static void si_dma_stop(struct amdgpu_device *adev)
 
                if (adev->mman.buffer_funcs_ring == ring)
                        amdgpu_ttm_set_buffer_funcs_status(adev, false);
-               ring->sched.ready = false;
        }
 }
 
@@ -267,7 +266,8 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        tmp = 0xCAFEDEAD;
        adev->wb.wb[index] = cpu_to_le32(tmp);
        memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, 256, &ib);
+       r = amdgpu_ib_get(adev, NULL, 256,
+                                       AMDGPU_IB_POOL_DIRECT, &ib);
        if (r)
                goto err0;
 
@@ -504,7 +504,8 @@ static int si_dma_sw_init(void *handle)
                                     &adev->sdma.trap_irq,
                                     (i == 0) ?
                                     AMDGPU_SDMA_IRQ_INSTANCE0 :
-                                    AMDGPU_SDMA_IRQ_INSTANCE1);
+                                    AMDGPU_SDMA_IRQ_INSTANCE1,
+                                    AMDGPU_RING_PRIO_DEFAULT);
                if (r)
                        return r;
        }
@@ -775,7 +776,8 @@ static void si_dma_set_irq_funcs(struct amdgpu_device *adev)
 static void si_dma_emit_copy_buffer(struct amdgpu_ib *ib,
                                       uint64_t src_offset,
                                       uint64_t dst_offset,
-                                      uint32_t byte_count)
+                                      uint32_t byte_count,
+                                      bool tmz)
 {
        ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
                                              1, 0, 0, byte_count);
index 0860e85a2d358be2707a69f37207f3d08f27a6ef..c00ba4b23c9a6fec651eb748f52947d5b63fe4ac 100644 (file)
@@ -345,26 +345,6 @@ static const struct si_dte_data dte_data_tahiti =
        false
 };
 
-#if 0
-static const struct si_dte_data dte_data_tahiti_le =
-{
-       { 0x1E8480, 0x7A1200, 0x2160EC0, 0x3938700, 0 },
-       { 0x7D, 0x7D, 0x4E4, 0xB00, 0 },
-       0x5,
-       0xAFC8,
-       0x64,
-       0x32,
-       1,
-       0,
-       0x10,
-       { 0x78, 0x7C, 0x82, 0x88, 0x8E, 0x94, 0x9A, 0xA0, 0xA6, 0xAC, 0xB0, 0xB4, 0xB8, 0xBC, 0xC0, 0xC4 },
-       { 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700 },
-       { 0x2AF8, 0x2AF8, 0x29BB, 0x27F9, 0x2637, 0x2475, 0x22B3, 0x20F1, 0x1F2F, 0x1D6D, 0x1734, 0x1414, 0x10F4, 0xDD4, 0xAB4, 0x794 },
-       85,
-       true
-};
-#endif
-
 static const struct si_dte_data dte_data_tahiti_pro =
 {
        { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
index d42a8d8a0dea9309723ab07b5afe2260614a61b3..c7c9e07962b96ee312a49de04dd96ccd363de3a2 100644 (file)
@@ -564,7 +564,8 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
 static int soc15_asic_reset(struct amdgpu_device *adev)
 {
        /* original raven doesn't have full asic reset */
-       if (adev->pdev->device == 0x15dd && adev->rev_id < 0x8)
+       if ((adev->apu_flags & AMD_APU_IS_RAVEN) &&
+           !(adev->apu_flags & AMD_APU_IS_RAVEN2))
                return 0;
 
        switch (soc15_asic_reset_method(adev)) {
@@ -708,7 +709,6 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
                adev->df.funcs = &df_v1_7_funcs;
 
        adev->rev_id = soc15_get_rev_id(adev);
-       adev->nbio.funcs->detect_hw_virt(adev);
 
        if (amdgpu_sriov_vf(adev))
                adev->virt.ops = &xgpu_ai_virt_ops;
@@ -1130,16 +1130,23 @@ static int soc15_common_early_init(void *handle)
                break;
        case CHIP_RAVEN:
                adev->asic_funcs = &soc15_asic_funcs;
+               if (adev->pdev->device == 0x15dd)
+                       adev->apu_flags |= AMD_APU_IS_RAVEN;
+               if (adev->pdev->device == 0x15d8)
+                       adev->apu_flags |= AMD_APU_IS_PICASSO;
                if (adev->rev_id >= 0x8)
+                       adev->apu_flags |= AMD_APU_IS_RAVEN2;
+
+               if (adev->apu_flags & AMD_APU_IS_RAVEN2)
                        adev->external_rev_id = adev->rev_id + 0x79;
-               else if (adev->pdev->device == 0x15d8)
+               else if (adev->apu_flags & AMD_APU_IS_PICASSO)
                        adev->external_rev_id = adev->rev_id + 0x41;
                else if (adev->rev_id == 1)
                        adev->external_rev_id = adev->rev_id + 0x20;
                else
                        adev->external_rev_id = adev->rev_id + 0x01;
 
-               if (adev->rev_id >= 0x8) {
+               if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
                        adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
                                AMD_CG_SUPPORT_GFX_MGLS |
                                AMD_CG_SUPPORT_GFX_CP_LS |
@@ -1157,7 +1164,7 @@ static int soc15_common_early_init(void *handle)
                                AMD_CG_SUPPORT_VCN_MGCG;
 
                        adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
-               } else if (adev->pdev->device == 0x15d8) {
+               } else if (adev->apu_flags & AMD_APU_IS_PICASSO) {
                        adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
                                AMD_CG_SUPPORT_GFX_MGLS |
                                AMD_CG_SUPPORT_GFX_CP_LS |
@@ -1218,11 +1225,12 @@ static int soc15_common_early_init(void *handle)
                        AMD_CG_SUPPORT_IH_CG |
                        AMD_CG_SUPPORT_VCN_MGCG |
                        AMD_CG_SUPPORT_JPEG_MGCG;
-               adev->pg_flags = 0;
+               adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG;
                adev->external_rev_id = adev->rev_id + 0x32;
                break;
        case CHIP_RENOIR:
                adev->asic_funcs = &soc15_asic_funcs;
+               adev->apu_flags |= AMD_APU_IS_RENOIR;
                adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
                                 AMD_CG_SUPPORT_GFX_MGLS |
                                 AMD_CG_SUPPORT_GFX_3D_CGCG |
index c893c645a4b2d5a22159e17c4e302690c3008e6c..56d02aa690a70149427a96138256af36eacbd1b8 100644 (file)
@@ -35,6 +35,9 @@
 #define RREG32_SOC15(ip, inst, reg) \
        RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)
 
+#define RREG32_SOC15_NO_KIQ(ip, inst, reg) \
+       RREG32_NO_KIQ(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)
+
 #define RREG32_SOC15_OFFSET(ip, inst, reg, offset) \
        RREG32((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset)
 
index edfe50821cd9bf5e042fc001eb7ca204660f9aed..799925d22fc81cfda76722859e044abef24388a7 100644 (file)
 #              define PACKET3_DMA_DATA_CMD_SAIC    (1 << 28)
 #              define PACKET3_DMA_DATA_CMD_DAIC    (1 << 29)
 #              define PACKET3_DMA_DATA_CMD_RAW_WAIT  (1 << 30)
-#define        PACKET3_AQUIRE_MEM                              0x58
+#define        PACKET3_ACQUIRE_MEM                             0x58
+/* 1.  HEADER
+ * 2.  COHER_CNTL [30:0]
+ * 2.1 ENGINE_SEL [31:31]
+ * 3.  COHER_SIZE [31:0]
+ * 4.  COHER_SIZE_HI [7:0]
+ * 5.  COHER_BASE_LO [31:0]
+ * 6.  COHER_BASE_HI [23:0]
+ * 7.  POLL_INTERVAL [15:0]
+ */
+/* COHER_CNTL fields for CP_COHER_CNTL */
+#define        PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_NC_ACTION_ENA(x) ((x) << 3)
+#define        PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WC_ACTION_ENA(x) ((x) << 4)
+#define        PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_INV_METADATA_ACTION_ENA(x) ((x) << 5)
+#define        PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_VOL_ACTION_ENA(x) ((x) << 15)
+#define        PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(x) ((x) << 18)
+#define        PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(x) ((x) << 22)
+#define        PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(x) ((x) << 23)
+#define        PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_CB_ACTION_ENA(x) ((x) << 25)
+#define        PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_DB_ACTION_ENA(x) ((x) << 26)
+#define        PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(x) ((x) << 27)
+#define        PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_VOL_ACTION_ENA(x) ((x) << 28)
+#define        PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(x) ((x) << 29)
+#define        PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_WB_ACTION_ENA(x) ((x) << 30)
 #define        PACKET3_REWIND                                  0x59
 #define        PACKET3_LOAD_UCONFIG_REG                        0x5E
 #define        PACKET3_LOAD_SH_REG                             0x5F
 #define        PACKET3_WAIT_ON_DE_COUNTER_DIFF                 0x88
 #define        PACKET3_SWITCH_BUFFER                           0x8B
 #define PACKET3_FRAME_CONTROL                          0x90
+#                      define FRAME_TMZ        (1 << 0)
 #                      define FRAME_CMD(x) ((x) << 28)
                        /*
                         * x=0: tmz_begin
index ca7d05993ca2f5961afcbecd528bb0f54ba08979..745ed0fba1ed9e56acfe70ea66b3f258ef8eb2f0 100644 (file)
@@ -24,6 +24,8 @@
 #ifndef _TA_RAS_IF_H
 #define _TA_RAS_IF_H
 
+#define RAS_TA_HOST_IF_VER     0
+
 /* Responses have bit 31 set */
 #define RSP_ID_MASK (1U << 31)
 #define RSP_ID(cmdId) (((uint32_t)(cmdId)) | RSP_ID_MASK)
@@ -36,18 +38,24 @@ enum ras_command {
        TA_RAS_COMMAND__TRIGGER_ERROR,
 };
 
-enum ta_ras_status {
-       TA_RAS_STATUS__SUCCESS                          = 0x00,
-       TA_RAS_STATUS__RESET_NEEDED                     = 0x01,
-       TA_RAS_STATUS__ERROR_INVALID_PARAMETER          = 0x02,
-       TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE          = 0x03,
-       TA_RAS_STATUS__ERROR_RAS_DUPLICATE_CMD          = 0x04,
-       TA_RAS_STATUS__ERROR_INJECTION_FAILED           = 0x05,
-       TA_RAS_STATUS__ERROR_ASD_READ_WRITE             = 0x06,
-       TA_RAS_STATUS__ERROR_TOGGLE_DF_CSTATE           = 0x07,
-       TA_RAS_STATUS__ERROR_TIMEOUT                    = 0x08,
-       TA_RAS_STATUS__ERROR_BLOCK_DISABLED             = 0x09,
-       TA_RAS_STATUS__ERROR_GENERIC                    = 0x10,
+enum ta_ras_status
+{
+       TA_RAS_STATUS__SUCCESS                          = 0x00,
+       TA_RAS_STATUS__RESET_NEEDED                     = 0xA001,
+       TA_RAS_STATUS__ERROR_INVALID_PARAMETER          = 0xA002,
+       TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE          = 0xA003,
+       TA_RAS_STATUS__ERROR_RAS_DUPLICATE_CMD          = 0xA004,
+       TA_RAS_STATUS__ERROR_INJECTION_FAILED           = 0xA005,
+       TA_RAS_STATUS__ERROR_ASD_READ_WRITE             = 0xA006,
+       TA_RAS_STATUS__ERROR_TOGGLE_DF_CSTATE           = 0xA007,
+       TA_RAS_STATUS__ERROR_TIMEOUT                    = 0xA008,
+       TA_RAS_STATUS__ERROR_BLOCK_DISABLED             = 0XA009,
+       TA_RAS_STATUS__ERROR_GENERIC                    = 0xA00A,
+       TA_RAS_STATUS__ERROR_RAS_MMHUB_INIT             = 0xA00B,
+       TA_RAS_STATUS__ERROR_GET_DEV_INFO               = 0xA00C,
+       TA_RAS_STATUS__ERROR_UNSUPPORTED_DEV            = 0xA00D,
+       TA_RAS_STATUS__ERROR_NOT_INITIALIZED            = 0xA00E,
+       TA_RAS_STATUS__ERROR_TEE_INTERNAL               = 0xA00F
 };
 
 enum ta_ras_block {
@@ -97,22 +105,39 @@ struct ta_ras_trigger_error_input {
        uint64_t                value;                  // method if error injection. i.e persistent, coherent etc.
 };
 
+struct ta_ras_output_flags
+{
+       uint8_t    ras_init_success_flag;
+       uint8_t    err_inject_switch_disable_flag;
+       uint8_t    reg_access_failure_flag;
+};
+
 /* Common input structure for RAS callbacks */
 /**********************************************************/
 union ta_ras_cmd_input {
        struct ta_ras_enable_features_input     enable_features;
        struct ta_ras_disable_features_input    disable_features;
        struct ta_ras_trigger_error_input       trigger_error;
+
+       uint32_t        reserve_pad[256];
+};
+
+union ta_ras_cmd_output
+{
+       struct ta_ras_output_flags  flags;
+
+       uint32_t        reserve_pad[256];
 };
 
 /* Shared Memory structures */
 /**********************************************************/
 struct ta_ras_shared_memory {
-       uint32_t                cmd_id;
-       uint32_t                resp_id;
-       enum ta_ras_status      ras_status;
-       uint32_t                reserved;
-       union ta_ras_cmd_input  ras_in_message;
+       uint32_t                    cmd_id;
+       uint32_t                    resp_id;
+       uint32_t                    ras_status;
+       uint32_t                    if_version;
+       union ta_ras_cmd_input      ras_in_message;
+       union ta_ras_cmd_output     ras_out_message;
 };
 
 #endif // TL_RAS_IF_H_
index 14d346321a5f7bfe78c0b4a424cc3113e23e11cb..418cf097c918a58a066d986bdca6d5b82519d3a1 100644 (file)
@@ -56,24 +56,43 @@ const uint32_t
 
 static void umc_v6_1_enable_umc_index_mode(struct amdgpu_device *adev)
 {
-       WREG32_FIELD15(RSMU, 0, RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
+       uint32_t rsmu_umc_addr, rsmu_umc_val;
+
+       rsmu_umc_addr = SOC15_REG_OFFSET(RSMU, 0,
+                       mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU);
+       rsmu_umc_val = RREG32_PCIE(rsmu_umc_addr * 4);
+
+       rsmu_umc_val = REG_SET_FIELD(rsmu_umc_val,
+                       RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
                        RSMU_UMC_INDEX_MODE_EN, 1);
+
+       WREG32_PCIE(rsmu_umc_addr * 4, rsmu_umc_val);
 }
 
 static void umc_v6_1_disable_umc_index_mode(struct amdgpu_device *adev)
 {
-       WREG32_FIELD15(RSMU, 0, RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
+       uint32_t rsmu_umc_addr, rsmu_umc_val;
+
+       rsmu_umc_addr = SOC15_REG_OFFSET(RSMU, 0,
+                       mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU);
+       rsmu_umc_val = RREG32_PCIE(rsmu_umc_addr * 4);
+
+       rsmu_umc_val = REG_SET_FIELD(rsmu_umc_val,
+                       RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
                        RSMU_UMC_INDEX_MODE_EN, 0);
+
+       WREG32_PCIE(rsmu_umc_addr * 4, rsmu_umc_val);
 }
 
 static uint32_t umc_v6_1_get_umc_index_mode_state(struct amdgpu_device *adev)
 {
-       uint32_t rsmu_umc_index;
+       uint32_t rsmu_umc_addr, rsmu_umc_val;
 
-       rsmu_umc_index = RREG32_SOC15(RSMU, 0,
+       rsmu_umc_addr = SOC15_REG_OFFSET(RSMU, 0,
                        mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU);
+       rsmu_umc_val = RREG32_PCIE(rsmu_umc_addr * 4);
 
-       return REG_GET_FIELD(rsmu_umc_index,
+       return REG_GET_FIELD(rsmu_umc_val,
                        RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
                        RSMU_UMC_INDEX_MODE_EN);
 }
@@ -85,6 +104,81 @@ static inline uint32_t get_umc_6_reg_offset(struct amdgpu_device *adev,
        return adev->umc.channel_offs*ch_inst + UMC_6_INST_DIST*umc_inst;
 }
 
+static void umc_v6_1_clear_error_count_per_channel(struct amdgpu_device *adev,
+                                       uint32_t umc_reg_offset)
+{
+       uint32_t ecc_err_cnt_addr;
+       uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
+
+       if (adev->asic_type == CHIP_ARCTURUS) {
+               /* UMC 6_1_2 registers */
+               ecc_err_cnt_sel_addr =
+                       SOC15_REG_OFFSET(UMC, 0,
+                                       mmUMCCH0_0_EccErrCntSel_ARCT);
+               ecc_err_cnt_addr =
+                       SOC15_REG_OFFSET(UMC, 0,
+                                       mmUMCCH0_0_EccErrCnt_ARCT);
+       } else {
+               /* UMC 6_1_1 registers */
+               ecc_err_cnt_sel_addr =
+                       SOC15_REG_OFFSET(UMC, 0,
+                                       mmUMCCH0_0_EccErrCntSel);
+               ecc_err_cnt_addr =
+                       SOC15_REG_OFFSET(UMC, 0,
+                                       mmUMCCH0_0_EccErrCnt);
+       }
+
+       /* select the lower chip */
+       ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr +
+                                       umc_reg_offset) * 4);
+       ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel,
+                                       UMCCH0_0_EccErrCntSel,
+                                       EccErrCntCsSel, 0);
+       WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4,
+                       ecc_err_cnt_sel);
+
+       /* clear lower chip error count */
+       WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4,
+                       UMC_V6_1_CE_CNT_INIT);
+
+       /* select the higher chip */
+       ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr +
+                                       umc_reg_offset) * 4);
+       ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel,
+                                       UMCCH0_0_EccErrCntSel,
+                                       EccErrCntCsSel, 1);
+       WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4,
+                       ecc_err_cnt_sel);
+
+       /* clear higher chip error count */
+       WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4,
+                       UMC_V6_1_CE_CNT_INIT);
+}
+
+static void umc_v6_1_clear_error_count(struct amdgpu_device *adev)
+{
+       uint32_t umc_inst        = 0;
+       uint32_t ch_inst         = 0;
+       uint32_t umc_reg_offset  = 0;
+       uint32_t rsmu_umc_index_state =
+                               umc_v6_1_get_umc_index_mode_state(adev);
+
+       if (rsmu_umc_index_state)
+               umc_v6_1_disable_umc_index_mode(adev);
+
+       LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+               umc_reg_offset = get_umc_6_reg_offset(adev,
+                                               umc_inst,
+                                               ch_inst);
+
+               umc_v6_1_clear_error_count_per_channel(adev,
+                                               umc_reg_offset);
+       }
+
+       if (rsmu_umc_index_state)
+               umc_v6_1_enable_umc_index_mode(adev);
+}
+
 static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev,
                                                   uint32_t umc_reg_offset,
                                                   unsigned long *error_count)
@@ -117,23 +211,21 @@ static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev,
        ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
                                        EccErrCntCsSel, 0);
        WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
+
        ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
        *error_count +=
                (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) -
                 UMC_V6_1_CE_CNT_INIT);
-       /* clear the lower chip err count */
-       WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT);
 
        /* select the higher chip and check the err counter */
        ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
                                        EccErrCntCsSel, 1);
        WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
+
        ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
        *error_count +=
                (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) -
                 UMC_V6_1_CE_CNT_INIT);
-       /* clear the higher chip err count */
-       WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT);
 
        /* check for SRAM correctable error
          MCUMC_STATUS is a 64 bit register */
@@ -209,6 +301,8 @@ static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev,
 
        if (rsmu_umc_index_state)
                umc_v6_1_enable_umc_index_mode(adev);
+
+       umc_v6_1_clear_error_count(adev);
 }
 
 static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
index 82abd8e728ab6c4794428ccd01720b7d5d7f0091..3cafba7265876fa30b2b17ac897e0996d21dcb84 100644 (file)
@@ -118,7 +118,8 @@ static int uvd_v4_2_sw_init(void *handle)
 
        ring = &adev->uvd.inst->ring;
        sprintf(ring->name, "uvd");
-       r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
+       r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
+                            AMDGPU_RING_PRIO_DEFAULT);
        if (r)
                return r;
 
@@ -210,13 +211,10 @@ done:
 static int uvd_v4_2_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct amdgpu_ring *ring = &adev->uvd.inst->ring;
 
        if (RREG32(mmUVD_STATUS) != 0)
                uvd_v4_2_stop(adev);
 
-       ring->sched.ready = false;
-
        return 0;
 }
 
index 0fa8aae2d78ebabffd9cdf52b0396dcbb153ae42..a566ff926e90dbfa0caf4de0139036c2fc308fde 100644 (file)
@@ -116,7 +116,8 @@ static int uvd_v5_0_sw_init(void *handle)
 
        ring = &adev->uvd.inst->ring;
        sprintf(ring->name, "uvd");
-       r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
+       r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
+                            AMDGPU_RING_PRIO_DEFAULT);
        if (r)
                return r;
 
@@ -208,13 +209,10 @@ done:
 static int uvd_v5_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct amdgpu_ring *ring = &adev->uvd.inst->ring;
 
        if (RREG32(mmUVD_STATUS) != 0)
                uvd_v5_0_stop(adev);
 
-       ring->sched.ready = false;
-
        return 0;
 }
 
index e0aadcaf6c8b3cb5629ae5605de26d2fe4a53074..0a880bc101b8470fc3116f254082229fdc333d61 100644 (file)
@@ -216,7 +216,8 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
        uint64_t addr;
        int i, r;
 
-       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+                                       AMDGPU_IB_POOL_DIRECT, &job);
        if (r)
                return r;
 
@@ -279,7 +280,8 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
        uint64_t addr;
        int i, r;
 
-       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+                                       AMDGPU_IB_POOL_DIRECT, &job);
        if (r)
                return r;
 
@@ -416,7 +418,8 @@ static int uvd_v6_0_sw_init(void *handle)
 
        ring = &adev->uvd.inst->ring;
        sprintf(ring->name, "uvd");
-       r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
+       r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
+                            AMDGPU_RING_PRIO_DEFAULT);
        if (r)
                return r;
 
@@ -428,7 +431,9 @@ static int uvd_v6_0_sw_init(void *handle)
                for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
                        ring = &adev->uvd.inst->ring_enc[i];
                        sprintf(ring->name, "uvd_enc%d", i);
-                       r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
+                       r = amdgpu_ring_init(adev, ring, 512,
+                                            &adev->uvd.inst->irq, 0,
+                                            AMDGPU_RING_PRIO_DEFAULT);
                        if (r)
                                return r;
                }
@@ -535,13 +540,10 @@ done:
 static int uvd_v6_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct amdgpu_ring *ring = &adev->uvd.inst->ring;
 
        if (RREG32(mmUVD_STATUS) != 0)
                uvd_v6_0_stop(adev);
 
-       ring->sched.ready = false;
-
        return 0;
 }
 
index 0995378d8263c495799068bee36b95c00874fe1c..7a55457e6f9e091a418ad76213ead235f516ef02 100644 (file)
@@ -224,7 +224,8 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
        uint64_t addr;
        int i, r;
 
-       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+                                       AMDGPU_IB_POOL_DIRECT, &job);
        if (r)
                return r;
 
@@ -286,7 +287,8 @@ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handl
        uint64_t addr;
        int i, r;
 
-       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+                                       AMDGPU_IB_POOL_DIRECT, &job);
        if (r)
                return r;
 
@@ -450,7 +452,9 @@ static int uvd_v7_0_sw_init(void *handle)
                if (!amdgpu_sriov_vf(adev)) {
                        ring = &adev->uvd.inst[j].ring;
                        sprintf(ring->name, "uvd_%d", ring->me);
-                       r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
+                       r = amdgpu_ring_init(adev, ring, 512,
+                                            &adev->uvd.inst[j].irq, 0,
+                                            AMDGPU_RING_PRIO_DEFAULT);
                        if (r)
                                return r;
                }
@@ -469,7 +473,9 @@ static int uvd_v7_0_sw_init(void *handle)
                                else
                                        ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring2_3 * 2 + 1;
                        }
-                       r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
+                       r = amdgpu_ring_init(adev, ring, 512,
+                                            &adev->uvd.inst[j].irq, 0,
+                                            AMDGPU_RING_PRIO_DEFAULT);
                        if (r)
                                return r;
                }
@@ -598,7 +604,6 @@ done:
 static int uvd_v7_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       int i;
 
        if (!amdgpu_sriov_vf(adev))
                uvd_v7_0_stop(adev);
@@ -607,12 +612,6 @@ static int uvd_v7_0_hw_fini(void *handle)
                DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
        }
 
-       for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
-               if (adev->uvd.harvest_config & (1 << i))
-                       continue;
-               adev->uvd.inst[i].ring.sched.ready = false;
-       }
-
        return 0;
 }
 
@@ -1694,7 +1693,7 @@ static int uvd_v7_0_set_clockgating_state(void *handle,
                                          enum amd_clockgating_state state)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+       bool enable = (state == AMD_CG_STATE_GATE);
 
        uvd_v7_0_set_bypass_mode(adev, enable);
 
index b6837fcfdba7b97e81c934472238ebb51c2bb3b1..0e2945baf0f157430452faccb5a184d7d7505b79 100644 (file)
@@ -434,7 +434,8 @@ static int vce_v2_0_sw_init(void *handle)
                ring = &adev->vce.ring[i];
                sprintf(ring->name, "vce%d", i);
                r = amdgpu_ring_init(adev, ring, 512,
-                                    &adev->vce.irq, 0);
+                                    &adev->vce.irq, 0,
+                                    AMDGPU_RING_PRIO_DEFAULT);
                if (r)
                        return r;
        }
index 217db187207c766c08a949d4f006c11d70c4cad6..6d9108fa22e0f21e08ffee13b2f78f1de1f069ec 100644 (file)
@@ -442,7 +442,8 @@ static int vce_v3_0_sw_init(void *handle)
        for (i = 0; i < adev->vce.num_rings; i++) {
                ring = &adev->vce.ring[i];
                sprintf(ring->name, "vce%d", i);
-               r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
+               r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
+                                    AMDGPU_RING_PRIO_DEFAULT);
                if (r)
                        return r;
        }
index 3fd102efb7afe63df7c2ee018ed2c328a856e27e..a0fb119240f40d0b6a7742626d1858cce7505d06 100644 (file)
@@ -476,7 +476,8 @@ static int vce_v4_0_sw_init(void *handle)
                        else
                                ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring2_3 * 2 + 1;
                }
-               r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
+               r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
+                                    AMDGPU_RING_PRIO_DEFAULT);
                if (r)
                        return r;
        }
@@ -539,7 +540,6 @@ static int vce_v4_0_hw_init(void *handle)
 static int vce_v4_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       int i;
 
        if (!amdgpu_sriov_vf(adev)) {
                /* vce_v4_0_wait_for_idle(handle); */
@@ -549,9 +549,6 @@ static int vce_v4_0_hw_fini(void *handle)
                DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
        }
 
-       for (i = 0; i < adev->vce.num_rings; i++)
-               adev->vce.ring[i].sched.ready = false;
-
        return 0;
 }
 
index 09b0572b838d29d6a758a570555722b03cbba341..1ad79155ed00fbafe00d6d2b80306b6d14be0299 100644 (file)
@@ -127,7 +127,8 @@ static int vcn_v1_0_sw_init(void *handle)
 
        ring = &adev->vcn.inst->ring_dec;
        sprintf(ring->name, "vcn_dec");
-       r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
+       r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
+                            AMDGPU_RING_PRIO_DEFAULT);
        if (r)
                return r;
 
@@ -145,7 +146,8 @@ static int vcn_v1_0_sw_init(void *handle)
        for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
                ring = &adev->vcn.inst->ring_enc[i];
                sprintf(ring->name, "vcn_enc%d", i);
-               r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
+               r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
+                                    AMDGPU_RING_PRIO_DEFAULT);
                if (r)
                        return r;
        }
@@ -227,14 +229,11 @@ done:
 static int vcn_v1_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
 
        if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
                RREG32_SOC15(VCN, 0, mmUVD_STATUS))
                vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
 
-       ring->sched.ready = false;
-
        return 0;
 }
 
index ec8091a661df140e8ffcc6d24d1db85225cc4232..90ed773695eaffd5f9c60b7cf781c2316a11e9c0 100644 (file)
@@ -92,6 +92,7 @@ static int vcn_v2_0_sw_init(void *handle)
        struct amdgpu_ring *ring;
        int i, r;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       volatile struct amdgpu_fw_shared *fw_shared;
 
        /* VCN DEC TRAP */
        r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
@@ -133,7 +134,8 @@ static int vcn_v2_0_sw_init(void *handle)
        ring->doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 << 1;
 
        sprintf(ring->name, "vcn_dec");
-       r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
+       r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
+                            AMDGPU_RING_PRIO_DEFAULT);
        if (r)
                return r;
 
@@ -163,7 +165,8 @@ static int vcn_v2_0_sw_init(void *handle)
                else
                        ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + i;
                sprintf(ring->name, "vcn_enc%d", i);
-               r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
+               r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
+                                    AMDGPU_RING_PRIO_DEFAULT);
                if (r)
                        return r;
        }
@@ -174,6 +177,8 @@ static int vcn_v2_0_sw_init(void *handle)
        if (r)
                return r;
 
+       fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
+       fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG);
        return 0;
 }
 
@@ -188,6 +193,9 @@ static int vcn_v2_0_sw_fini(void *handle)
 {
        int r;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
+
+       fw_shared->present_flag_0 = 0;
 
        amdgpu_virt_free_mm_table(adev);
 
@@ -223,6 +231,10 @@ static int vcn_v2_0_hw_init(void *handle)
        if (r)
                goto done;
 
+       //Disable vcn decode for sriov
+       if (amdgpu_sriov_vf(adev))
+               ring->sched.ready = false;
+
        for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
                ring = &adev->vcn.inst->ring_enc[i];
                r = amdgpu_ring_test_helper(ring);
@@ -248,21 +260,12 @@ done:
 static int vcn_v2_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
-       int i;
 
        if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
            (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
              RREG32_SOC15(VCN, 0, mmUVD_STATUS)))
                vcn_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
 
-       ring->sched.ready = false;
-
-       for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
-               ring = &adev->vcn.inst->ring_enc[i];
-               ring->sched.ready = false;
-       }
-
        return 0;
 }
 
@@ -359,6 +362,15 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
 
+       /* non-cache window */
+       WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
+               lower_32_bits(adev->vcn.inst->fw_shared_gpu_addr));
+       WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
+               upper_32_bits(adev->vcn.inst->fw_shared_gpu_addr));
+       WREG32_SOC15(UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
+       WREG32_SOC15(UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0,
+               AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
+
        WREG32_SOC15(UVD, 0, mmUVD_GFX10_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
 }
 
@@ -442,13 +454,16 @@ static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirec
 
        /* non-cache window */
        WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 0, 0, indirect);
+               UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
+               lower_32_bits(adev->vcn.inst->fw_shared_gpu_addr), 0, indirect);
        WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 0, 0, indirect);
+               UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
+               upper_32_bits(adev->vcn.inst->fw_shared_gpu_addr), 0, indirect);
        WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
                UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
        WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0), 0, 0, indirect);
+               UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0),
+               AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
 
        /* VCN global tiling registers */
        WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
@@ -773,6 +788,7 @@ static void vcn_v2_0_enable_static_power_gating(struct amdgpu_device *adev)
 
 static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
 {
+       volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
        struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
        uint32_t rb_bufsz, tmp;
 
@@ -872,6 +888,12 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
        WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
 
+       /* Stall DPG before WPTR/RPTR reset */
+       WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
+               UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
+               ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
+       fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
+
        /* set the write pointer delay */
        WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
 
@@ -894,11 +916,16 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
        WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
                lower_32_bits(ring->wptr));
 
+       fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
+       /* Unstall DPG */
+       WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
+               0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
        return 0;
 }
 
 static int vcn_v2_0_start(struct amdgpu_device *adev)
 {
+       volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
        struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
        uint32_t rb_bufsz, tmp;
        uint32_t lmi_swap_cntl;
@@ -1033,6 +1060,7 @@ static int vcn_v2_0_start(struct amdgpu_device *adev)
        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
        WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
 
+       fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
        /* programm the RB_BASE for ring buffer */
        WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
                lower_32_bits(ring->gpu_addr));
@@ -1045,20 +1073,25 @@ static int vcn_v2_0_start(struct amdgpu_device *adev)
        ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
        WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
                        lower_32_bits(ring->wptr));
+       fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
 
+       fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
        ring = &adev->vcn.inst->ring_enc[0];
        WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
        WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
+       fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
 
+       fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
        ring = &adev->vcn.inst->ring_enc[1];
        WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
        WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
+       fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
 
        return 0;
 }
@@ -1180,6 +1213,7 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
                                UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
 
                        if (!ret_code) {
+                               volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
                                /* pause DPG */
                                reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
                                WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
@@ -1189,23 +1223,38 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
                                           UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
                                           UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
 
+                               /* Stall DPG before WPTR/RPTR reset */
+                               WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
+                                          UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
+                                          ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
                                /* Restore */
+                               fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
                                ring = &adev->vcn.inst->ring_enc[0];
+                               ring->wptr = 0;
                                WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
                                WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
                                WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
                                WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
                                WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+                               fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
 
+                               fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
                                ring = &adev->vcn.inst->ring_enc[1];
+                               ring->wptr = 0;
                                WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
                                WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
                                WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
                                WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
                                WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+                               fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
 
+                               fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
                                WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
                                           RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
+                               fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
+                               /* Unstall DPG */
+                               WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
+                                          0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
 
                                SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
                                           UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
@@ -1796,7 +1845,6 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
        uint32_t table_size = 0;
        struct mmsch_v2_0_cmd_direct_write direct_wt = { {0} };
        struct mmsch_v2_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
-       struct mmsch_v2_0_cmd_direct_polling direct_poll = { {0} };
        struct mmsch_v2_0_cmd_end end = { {0} };
        struct mmsch_v2_0_init_header *header;
        uint32_t *init_table = adev->virt.mm_table.cpu_addr;
@@ -1806,8 +1854,6 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
        direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
        direct_rd_mod_wt.cmd_header.command_type =
                MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
-       direct_poll.cmd_header.command_type =
-               MMSCH_COMMAND__DIRECT_REG_POLLING;
        end.cmd_header.command_type = MMSCH_COMMAND__END;
 
        if (header->vcn_table_offset == 0 && header->vcn_table_size == 0) {
index c6363f5ad564026bc1f9f2d844a6bdd07cdc4630..3c6eafb62ee68e5c22ecc33e9c5de90f1199f9b0 100644 (file)
@@ -86,7 +86,7 @@ static int vcn_v2_5_early_init(void *handle)
 
                        adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS;
                        for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
-                               harvest = RREG32_SOC15(UVD, i, mmCC_UVD_HARVESTING);
+                               harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING);
                                if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
                                        adev->vcn.harvest_config |= 1 << i;
                        }
@@ -165,6 +165,8 @@ static int vcn_v2_5_sw_init(void *handle)
                return r;
 
        for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
+               volatile struct amdgpu_fw_shared *fw_shared;
+
                if (adev->vcn.harvest_config & (1 << j))
                        continue;
                adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
@@ -175,15 +177,15 @@ static int vcn_v2_5_sw_init(void *handle)
                adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
 
                adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
-               adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(UVD, j, mmUVD_SCRATCH9);
+               adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(VCN, j, mmUVD_SCRATCH9);
                adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
-               adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_DATA0);
+               adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA0);
                adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
-               adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_DATA1);
+               adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA1);
                adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
-               adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_CMD);
+               adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_CMD);
                adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
-               adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(UVD, j, mmUVD_NO_OP);
+               adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(VCN, j, mmUVD_NO_OP);
 
                ring = &adev->vcn.inst[j].ring_dec;
                ring->use_doorbell = true;
@@ -191,7 +193,8 @@ static int vcn_v2_5_sw_init(void *handle)
                ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
                                (amdgpu_sriov_vf(adev) ? 2*j : 8*j);
                sprintf(ring->name, "vcn_dec_%d", j);
-               r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
+               r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq,
+                                    0, AMDGPU_RING_PRIO_DEFAULT);
                if (r)
                        return r;
 
@@ -203,10 +206,15 @@ static int vcn_v2_5_sw_init(void *handle)
                                        (amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j));
 
                        sprintf(ring->name, "vcn_enc_%d.%d", j, i);
-                       r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
+                       r = amdgpu_ring_init(adev, ring, 512,
+                                            &adev->vcn.inst[j].irq, 0,
+                                            AMDGPU_RING_PRIO_DEFAULT);
                        if (r)
                                return r;
                }
+
+               fw_shared = adev->vcn.inst[j].fw_shared_cpu_addr;
+               fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG);
        }
 
        if (amdgpu_sriov_vf(adev)) {
@@ -230,8 +238,16 @@ static int vcn_v2_5_sw_init(void *handle)
  */
 static int vcn_v2_5_sw_fini(void *handle)
 {
-       int r;
+       int i, r;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       volatile struct amdgpu_fw_shared *fw_shared;
+
+       for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+               if (adev->vcn.harvest_config & (1 << i))
+                       continue;
+               fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr;
+               fw_shared->present_flag_0 = 0;
+       }
 
        if (amdgpu_sriov_vf(adev))
                amdgpu_virt_free_mm_table(adev);
@@ -308,25 +324,16 @@ done:
 static int vcn_v2_5_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct amdgpu_ring *ring;
-       int i, j;
+       int i;
 
        for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
                if (adev->vcn.harvest_config & (1 << i))
                        continue;
-               ring = &adev->vcn.inst[i].ring_dec;
 
                if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
                    (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
                     RREG32_SOC15(VCN, i, mmUVD_STATUS)))
                        vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
-
-               ring->sched.ready = false;
-
-               for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
-                       ring = &adev->vcn.inst[i].ring_enc[j];
-                       ring->sched.ready = false;
-               }
        }
 
        return 0;
@@ -392,38 +399,47 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
                        continue;
                /* cache window 0: fw */
                if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
-                       WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+                       WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
                                (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo));
-                       WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+                       WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
                                (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi));
-                       WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
+                       WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
                        offset = 0;
                } else {
-                       WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+                       WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
                                lower_32_bits(adev->vcn.inst[i].gpu_addr));
-                       WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+                       WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
                                upper_32_bits(adev->vcn.inst[i].gpu_addr));
                        offset = size;
-                       WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
+                       WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0,
                                AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
                }
-               WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size);
+               WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE0, size);
 
                /* cache window 1: stack */
-               WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
+               WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
                        lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
-               WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
+               WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
                        upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
-               WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, 0);
-               WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
+               WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET1, 0);
+               WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
 
                /* cache window 2: context */
-               WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
+               WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
                        lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
-               WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
+               WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
                        upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
-               WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, 0);
-               WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
+               WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET2, 0);
+               WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
+
+               /* non-cache window */
+               WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
+                       lower_32_bits(adev->vcn.inst[i].fw_shared_gpu_addr));
+               WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
+                       upper_32_bits(adev->vcn.inst[i].fw_shared_gpu_addr));
+               WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
+               WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_SIZE0,
+                       AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
        }
 }
 
@@ -436,88 +452,91 @@ static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
        if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
                if (!indirect) {
                        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-                               UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+                               VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
                                (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
                        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-                               UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+                               VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
                                (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
                        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-                               UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
+                               VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
                } else {
                        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-                               UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
+                               VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
                        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-                               UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
+                               VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
                        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-                               UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
+                               VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
                }
                offset = 0;
        } else {
                WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-                       UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+                       VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
                        lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
                WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-                       UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+                       VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
                        upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
                offset = size;
                WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-                       UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
+                       VCN, 0, mmUVD_VCPU_CACHE_OFFSET0),
                        AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
        }
 
        if (!indirect)
                WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-                       UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
+                       VCN, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
        else
                WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-                       UVD, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
+                       VCN, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
 
        /* cache window 1: stack */
        if (!indirect) {
                WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-                       UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
+                       VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
                        lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
                WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-                       UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
+                       VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
                        upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
                WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-                       UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
+                       VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
        } else {
                WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-                       UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
+                       VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
                WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-                       UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
+                       VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
                WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-                       UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
+                       VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
        }
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
+               VCN, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
 
        /* cache window 2: context */
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
+               VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
                lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
+               VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
                upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
+               VCN, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
+               VCN, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
 
        /* non-cache window */
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 0, 0, indirect);
+               VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
+               lower_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect);
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 0, 0, indirect);
+               VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
+               upper_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect);
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
+               VCN, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0), 0, 0, indirect);
+               VCN, 0, mmUVD_VCPU_NONCACHE_SIZE0),
+               AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
 
        /* VCN global tiling registers */
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
+               VCN, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
 }
 
 /**
@@ -671,19 +690,19 @@ static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev,
                 UVD_CGC_CTRL__VCPU_MODE_MASK |
                 UVD_CGC_CTRL__MMSCH_MODE_MASK);
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
+               VCN, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
 
        /* turn off clock gating */
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
+               VCN, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
 
        /* turn on SUVD clock gating */
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
+               VCN, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
 
        /* turn on sw mode in UVD_SUVD_CGC_CTRL */
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
+               VCN, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
 }
 
 /**
@@ -750,17 +769,18 @@ static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev)
 
 static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
 {
+       volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr;
        struct amdgpu_ring *ring;
        uint32_t rb_bufsz, tmp;
 
        /* disable register anti-hang mechanism */
-       WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS), 1,
+       WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1,
                ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
        /* enable dynamic power gating mode */
-       tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_POWER_STATUS);
+       tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS);
        tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
        tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
-       WREG32_SOC15(UVD, inst_idx, mmUVD_POWER_STATUS, tmp);
+       WREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS, tmp);
 
        if (indirect)
                adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t*)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
@@ -773,11 +793,11 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
        tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
        tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
+               VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
 
        /* disable master interupt */
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
+               VCN, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
 
        /* setup mmUVD_LMI_CTRL */
        tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
@@ -789,28 +809,28 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
                (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
                0x00100000L);
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
+               VCN, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
 
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_MPC_CNTL),
+               VCN, 0, mmUVD_MPC_CNTL),
                0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
 
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_MPC_SET_MUXA0),
+               VCN, 0, mmUVD_MPC_SET_MUXA0),
                ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
                 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
                 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
                 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
 
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_MPC_SET_MUXB0),
+               VCN, 0, mmUVD_MPC_SET_MUXB0),
                ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
                 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
                 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
                 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
 
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_MPC_SET_MUX),
+               VCN, 0, mmUVD_MPC_SET_MUX),
                ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
                 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
                 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
@@ -818,26 +838,26 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
        vcn_v2_5_mc_resume_dpg_mode(adev, inst_idx, indirect);
 
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
+               VCN, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
+               VCN, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
 
        /* enable LMI MC and UMC channels */
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_LMI_CTRL2), 0, 0, indirect);
+               VCN, 0, mmUVD_LMI_CTRL2), 0, 0, indirect);
 
        /* unblock VCPU register access */
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
+               VCN, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
 
        tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
        tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
+               VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
 
        /* enable master interrupt */
        WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-               UVD, 0, mmUVD_MASTINT_EN),
+               VCN, 0, mmUVD_MASTINT_EN),
                UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
 
        if (indirect)
@@ -853,30 +873,41 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
-       WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
+       WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
+
+       /* Stall DPG before WPTR/RPTR reset */
+       WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
+               UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
+               ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
+       fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
 
        /* set the write pointer delay */
-       WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
+       WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
 
        /* set the wb address */
-       WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
+       WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
                (upper_32_bits(ring->gpu_addr) >> 2));
 
        /* programm the RB_BASE for ring buffer */
-       WREG32_SOC15(UVD, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
+       WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
                lower_32_bits(ring->gpu_addr));
-       WREG32_SOC15(UVD, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
+       WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
                upper_32_bits(ring->gpu_addr));
 
        /* Initialize the ring buffer's read and write pointers */
-       WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR, 0);
+       WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, 0);
 
-       WREG32_SOC15(UVD, inst_idx, mmUVD_SCRATCH2, 0);
+       WREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2, 0);
 
-       ring->wptr = RREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR);
-       WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR,
+       ring->wptr = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR);
+       WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
                lower_32_bits(ring->wptr));
 
+       fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
+       /* Unstall DPG */
+       WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
+               0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
+
        return 0;
 }
 
@@ -898,12 +929,12 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
                }
 
                /* disable register anti-hang mechanism */
-               WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS), 0,
+               WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS), 0,
                        ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
 
                /* set uvd status busy */
-               tmp = RREG32_SOC15(UVD, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
-               WREG32_SOC15(UVD, i, mmUVD_STATUS, tmp);
+               tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
+               WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp);
        }
 
        if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
@@ -916,44 +947,44 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
                if (adev->vcn.harvest_config & (1 << i))
                        continue;
                /* enable VCPU clock */
-               WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
+               WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
                        UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
 
                /* disable master interrupt */
-               WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN), 0,
+               WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0,
                        ~UVD_MASTINT_EN__VCPU_EN_MASK);
 
                /* setup mmUVD_LMI_CTRL */
-               tmp = RREG32_SOC15(UVD, i, mmUVD_LMI_CTRL);
+               tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL);
                tmp &= ~0xff;
-               WREG32_SOC15(UVD, i, mmUVD_LMI_CTRL, tmp | 0x8|
+               WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp | 0x8|
                        UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
                        UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
                        UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
                        UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
 
                /* setup mmUVD_MPC_CNTL */
-               tmp = RREG32_SOC15(UVD, i, mmUVD_MPC_CNTL);
+               tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL);
                tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
                tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
                WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
 
                /* setup UVD_MPC_SET_MUXA0 */
-               WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUXA0,
+               WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0,
                        ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
                        (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
                        (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
                        (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
 
                /* setup UVD_MPC_SET_MUXB0 */
-               WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUXB0,
+               WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0,
                        ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
                        (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
                        (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
                        (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
 
                /* setup mmUVD_MPC_SET_MUX */
-               WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUX,
+               WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX,
                        ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
                        (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
                        (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
@@ -962,30 +993,31 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
        vcn_v2_5_mc_resume(adev);
 
        for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+               volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr;
                if (adev->vcn.harvest_config & (1 << i))
                        continue;
                /* VCN global tiling registers */
-               WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG,
+               WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
                        adev->gfx.config.gb_addr_config);
-               WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG,
+               WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
                        adev->gfx.config.gb_addr_config);
 
                /* enable LMI MC and UMC channels */
-               WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0,
+               WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0,
                        ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
 
                /* unblock VCPU register access */
-               WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_ARB_CTRL), 0,
+               WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0,
                        ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
 
-               WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
+               WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
                        ~UVD_VCPU_CNTL__BLK_RST_MASK);
 
                for (k = 0; k < 10; ++k) {
                        uint32_t status;
 
                        for (j = 0; j < 100; ++j) {
-                               status = RREG32_SOC15(UVD, i, mmUVD_STATUS);
+                               status = RREG32_SOC15(VCN, i, mmUVD_STATUS);
                                if (status & 2)
                                        break;
                                if (amdgpu_emu_mode == 1)
@@ -998,11 +1030,11 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
                                break;
 
                        DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
-                       WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
+                       WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
                                UVD_VCPU_CNTL__BLK_RST_MASK,
                                ~UVD_VCPU_CNTL__BLK_RST_MASK);
                        mdelay(10);
-                       WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
+                       WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
                                ~UVD_VCPU_CNTL__BLK_RST_MASK);
 
                        mdelay(10);
@@ -1015,15 +1047,15 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
                }
 
                /* enable master interrupt */
-               WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
+               WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN),
                        UVD_MASTINT_EN__VCPU_EN_MASK,
                        ~UVD_MASTINT_EN__VCPU_EN_MASK);
 
                /* clear the busy bit of VCN_STATUS */
-               WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0,
+               WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0,
                        ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
 
-               WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_VMID, 0);
+               WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0);
 
                ring = &adev->vcn.inst[i].ring_dec;
                /* force RBC into idle state */
@@ -1033,33 +1065,40 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
                tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
                tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
                tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
-               WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, tmp);
+               WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
 
+               fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
                /* programm the RB_BASE for ring buffer */
-               WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
+               WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
                        lower_32_bits(ring->gpu_addr));
-               WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
+               WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
                        upper_32_bits(ring->gpu_addr));
 
                /* Initialize the ring buffer's read and write pointers */
-               WREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR, 0);
+               WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
 
-               ring->wptr = RREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR);
-               WREG32_SOC15(UVD, i, mmUVD_RBC_RB_WPTR,
+               ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
+               WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
                                lower_32_bits(ring->wptr));
-               ring = &adev->vcn.inst[i].ring_enc[0];
-               WREG32_SOC15(UVD, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
-               WREG32_SOC15(UVD, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
-               WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
-               WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
-               WREG32_SOC15(UVD, i, mmUVD_RB_SIZE, ring->ring_size / 4);
+               fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
 
+               fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
+               ring = &adev->vcn.inst[i].ring_enc[0];
+               WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
+               WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+               WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
+               WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+               WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
+               fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
+
+               fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
                ring = &adev->vcn.inst[i].ring_enc[1];
-               WREG32_SOC15(UVD, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
-               WREG32_SOC15(UVD, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
-               WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
-               WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
-               WREG32_SOC15(UVD, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
+               WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
+               WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+               WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
+               WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+               WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
+               fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
        }
 
        return 0;
@@ -1079,33 +1118,33 @@ static int vcn_v2_5_mmsch_start(struct amdgpu_device *adev,
         * 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of
         *  memory descriptor location
         */
-       WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
-       WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
+       WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
+       WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
 
        /* 2, update vmid of descriptor */
-       data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID);
+       data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID);
        data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
        /* use domain0 for MM scheduler */
        data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
-       WREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID, data);
+       WREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID, data);
 
        /* 3, notify mmsch about the size of this descriptor */
-       WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_SIZE, size);
+       WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_SIZE, size);
 
        /* 4, set resp to zero */
-       WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
+       WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
 
        /*
         * 5, kick off the initialization and wait until
         * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
         */
-       WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
+       WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
 
-       data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
+       data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
        loop = 10;
        while ((data & 0x10000002) != 0x10000002) {
                udelay(100);
-               data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
+               data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
                loop--;
                if (!loop)
                        break;
@@ -1128,14 +1167,12 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
        uint32_t table_size = 0;
        struct mmsch_v1_0_cmd_direct_write direct_wt = { { 0 } };
        struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { { 0 } };
-       struct mmsch_v1_0_cmd_direct_polling direct_poll = { { 0 } };
        struct mmsch_v1_0_cmd_end end = { { 0 } };
        uint32_t *init_table = adev->virt.mm_table.cpu_addr;
        struct mmsch_v1_1_init_header *header = (struct mmsch_v1_1_init_header *)init_table;
 
        direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
        direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
-       direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
        end.cmd_header.command_type = MMSCH_COMMAND__END;
 
        header->version = MMSCH_VERSION;
@@ -1150,93 +1187,93 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
                table_size = 0;
 
                MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(
-                       SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
+                       SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS),
                        ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
 
                size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
                /* mc resume*/
                if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
                        MMSCH_V1_0_INSERT_DIRECT_WT(
-                               SOC15_REG_OFFSET(UVD, i,
+                               SOC15_REG_OFFSET(VCN, i,
                                        mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
                                adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
                        MMSCH_V1_0_INSERT_DIRECT_WT(
-                               SOC15_REG_OFFSET(UVD, i,
+                               SOC15_REG_OFFSET(VCN, i,
                                        mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
                                adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
                        offset = 0;
                        MMSCH_V1_0_INSERT_DIRECT_WT(
-                               SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0), 0);
+                               SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0), 0);
                } else {
                        MMSCH_V1_0_INSERT_DIRECT_WT(
-                               SOC15_REG_OFFSET(UVD, i,
+                               SOC15_REG_OFFSET(VCN, i,
                                        mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
                                lower_32_bits(adev->vcn.inst[i].gpu_addr));
                        MMSCH_V1_0_INSERT_DIRECT_WT(
-                               SOC15_REG_OFFSET(UVD, i,
+                               SOC15_REG_OFFSET(VCN, i,
                                        mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
                                upper_32_bits(adev->vcn.inst[i].gpu_addr));
                        offset = size;
                        MMSCH_V1_0_INSERT_DIRECT_WT(
-                               SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0),
+                               SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0),
                                AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
                }
 
                MMSCH_V1_0_INSERT_DIRECT_WT(
-                       SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0),
+                       SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE0),
                        size);
                MMSCH_V1_0_INSERT_DIRECT_WT(
-                       SOC15_REG_OFFSET(UVD, i,
+                       SOC15_REG_OFFSET(VCN, i,
                                mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
                        lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
                MMSCH_V1_0_INSERT_DIRECT_WT(
-                       SOC15_REG_OFFSET(UVD, i,
+                       SOC15_REG_OFFSET(VCN, i,
                                mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
                        upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
                MMSCH_V1_0_INSERT_DIRECT_WT(
-                       SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1),
+                       SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET1),
                        0);
                MMSCH_V1_0_INSERT_DIRECT_WT(
-                       SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1),
+                       SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE1),
                        AMDGPU_VCN_STACK_SIZE);
                MMSCH_V1_0_INSERT_DIRECT_WT(
-                       SOC15_REG_OFFSET(UVD, i,
+                       SOC15_REG_OFFSET(VCN, i,
                                mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
                        lower_32_bits(adev->vcn.inst[i].gpu_addr + offset +
                                AMDGPU_VCN_STACK_SIZE));
                MMSCH_V1_0_INSERT_DIRECT_WT(
-                       SOC15_REG_OFFSET(UVD, i,
+                       SOC15_REG_OFFSET(VCN, i,
                                mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
                        upper_32_bits(adev->vcn.inst[i].gpu_addr + offset +
                                AMDGPU_VCN_STACK_SIZE));
                MMSCH_V1_0_INSERT_DIRECT_WT(
-                       SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2),
+                       SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET2),
                        0);
                MMSCH_V1_0_INSERT_DIRECT_WT(
-                       SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
+                       SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE2),
                        AMDGPU_VCN_CONTEXT_SIZE);
 
                ring = &adev->vcn.inst[i].ring_enc[0];
                ring->wptr = 0;
 
                MMSCH_V1_0_INSERT_DIRECT_WT(
-                       SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO),
+                       SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_LO),
                        lower_32_bits(ring->gpu_addr));
                MMSCH_V1_0_INSERT_DIRECT_WT(
-                       SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI),
+                       SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_HI),
                        upper_32_bits(ring->gpu_addr));
                MMSCH_V1_0_INSERT_DIRECT_WT(
-                       SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE),
+                       SOC15_REG_OFFSET(VCN, i, mmUVD_RB_SIZE),
                        ring->ring_size / 4);
 
                ring = &adev->vcn.inst[i].ring_dec;
                ring->wptr = 0;
                MMSCH_V1_0_INSERT_DIRECT_WT(
-                       SOC15_REG_OFFSET(UVD, i,
+                       SOC15_REG_OFFSET(VCN, i,
                                mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
                        lower_32_bits(ring->gpu_addr));
                MMSCH_V1_0_INSERT_DIRECT_WT(
-                       SOC15_REG_OFFSET(UVD, i,
+                       SOC15_REG_OFFSET(VCN, i,
                                mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
                        upper_32_bits(ring->gpu_addr));
 
@@ -1248,7 +1285,7 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
                tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
                tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
                MMSCH_V1_0_INSERT_DIRECT_WT(
-                       SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
+                       SOC15_REG_OFFSET(VCN, i, mmUVD_RBC_RB_CNTL), tmp);
 
                /* add end packet */
                memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
@@ -1269,24 +1306,24 @@ static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
        uint32_t tmp;
 
        /* Wait for power status to be 1 */
-       SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 1,
+       SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
                UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
 
        /* wait for read ptr to be equal to write ptr */
-       tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR);
-       SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
+       tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR);
+       SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
 
-       tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR2);
-       SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code);
+       tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2);
+       SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code);
 
-       tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
-       SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
+       tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
+       SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
 
-       SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 1,
+       SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
                UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
 
        /* disable dynamic power gating mode */
-       WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS), 0,
+       WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0,
                        ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
 
        return 0;
@@ -1330,17 +1367,17 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev)
                        return r;
 
                /* block VCPU register access */
-               WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_ARB_CTRL),
+               WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL),
                        UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
                        ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
 
                /* reset VCPU */
-               WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
+               WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
                        UVD_VCPU_CNTL__BLK_RST_MASK,
                        ~UVD_VCPU_CNTL__BLK_RST_MASK);
 
                /* disable VCPU clock */
-               WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
+               WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
                        ~(UVD_VCPU_CNTL__CLK_EN_MASK));
 
                /* clear status */
@@ -1349,7 +1386,7 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev)
                vcn_v2_5_enable_clock_gating(adev);
 
                /* enable register anti-hang mechanism */
-               WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS),
+               WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS),
                        UVD_POWER_STATUS__UVD_POWER_STATUS_MASK,
                        ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
        }
@@ -1365,55 +1402,69 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
 {
        struct amdgpu_ring *ring;
        uint32_t reg_data = 0;
-       int ret_code;
+       int ret_code = 0;
 
        /* pause/unpause if state is changed */
        if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
                DRM_DEBUG("dpg pause state changed %d -> %d",
                        adev->vcn.inst[inst_idx].pause_state.fw_based,  new_state->fw_based);
-               reg_data = RREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE) &
+               reg_data = RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE) &
                        (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
 
                if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
-                       ret_code = 0;
-                       SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 0x1,
+                       SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
                                UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
 
                        if (!ret_code) {
+                               volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr;
+
                                /* pause DPG */
                                reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
-                               WREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE, reg_data);
+                               WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
 
                                /* wait for ACK */
-                               SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_DPG_PAUSE,
+                               SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_DPG_PAUSE,
                                           UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
                                           UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
 
+                               /* Stall DPG before WPTR/RPTR reset */
+                               WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
+                                          UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
+                                          ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
+
                                /* Restore */
+                               fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
                                ring = &adev->vcn.inst[inst_idx].ring_enc[0];
-                               WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
-                               WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
-                               WREG32_SOC15(UVD, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
-                               WREG32_SOC15(UVD, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
-                               WREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
-
+                               ring->wptr = 0;
+                               WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
+                               WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+                               WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
+                               WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
+                               WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+                               fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
+
+                               fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
                                ring = &adev->vcn.inst[inst_idx].ring_enc[1];
-                               WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
-                               WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
-                               WREG32_SOC15(UVD, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
-                               WREG32_SOC15(UVD, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
-                               WREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
-
-                               WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR,
-                                          RREG32_SOC15(UVD, inst_idx, mmUVD_SCRATCH2) & 0x7FFFFFFF);
-
-                               SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS,
+                               ring->wptr = 0;
+                               WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
+                               WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+                               WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
+                               WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
+                               WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+                               fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
+
+                               /* Unstall DPG */
+                               WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
+                                          0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
+
+                               SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS,
                                           UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
                        }
                } else {
-                       /* unpause dpg, no need to wait */
                        reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
-                       WREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE, reg_data);
+                       WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
+                       SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
+                               UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
                }
                adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
        }
@@ -1432,7 +1483,7 @@ static uint64_t vcn_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
 
-       return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
+       return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_RPTR);
 }
 
 /**
@@ -1449,7 +1500,7 @@ static uint64_t vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring)
        if (ring->use_doorbell)
                return adev->wb.wb[ring->wptr_offs];
        else
-               return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR);
+               return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR);
 }
 
 /**
@@ -1463,15 +1514,11 @@ static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
 
-       if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
-               WREG32_SOC15(UVD, ring->me, mmUVD_SCRATCH2,
-                       lower_32_bits(ring->wptr) | 0x80000000);
-
        if (ring->use_doorbell) {
                adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
                WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
        } else {
-               WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
+               WREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
        }
 }
 
@@ -1517,9 +1564,9 @@ static uint64_t vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring *ring)
        struct amdgpu_device *adev = ring->adev;
 
        if (ring == &adev->vcn.inst[ring->me].ring_enc[0])
-               return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR);
+               return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR);
        else
-               return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2);
+               return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR2);
 }
 
 /**
@@ -1537,12 +1584,12 @@ static uint64_t vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring *ring)
                if (ring->use_doorbell)
                        return adev->wb.wb[ring->wptr_offs];
                else
-                       return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR);
+                       return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR);
        } else {
                if (ring->use_doorbell)
                        return adev->wb.wb[ring->wptr_offs];
                else
-                       return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2);
+                       return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2);
        }
 }
 
@@ -1562,14 +1609,14 @@ static void vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring *ring)
                        adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
                        WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
                } else {
-                       WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+                       WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
                }
        } else {
                if (ring->use_doorbell) {
                        adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
                        WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
                } else {
-                       WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+                       WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
                }
        }
 }
index 3ce10e05d0d6b8f4efe32eb8f255785e7cd22ccf..af8986a553544d97312ab8d1d3848b9a3bdfc961 100644 (file)
@@ -448,27 +448,6 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
        return true;
 }
 
-static void vi_detect_hw_virtualization(struct amdgpu_device *adev)
-{
-       uint32_t reg = 0;
-
-       if (adev->asic_type == CHIP_TONGA ||
-           adev->asic_type == CHIP_FIJI) {
-              reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
-              /* bit0: 0 means pf and 1 means vf */
-              if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER))
-                      adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
-              /* bit31: 0 means disable IOV and 1 means enable */
-              if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE))
-                      adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
-       }
-
-       if (reg == 0) {
-               if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */
-                       adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
-       }
-}
-
 static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
        {mmGRBM_STATUS},
        {mmGRBM_STATUS2},
@@ -1728,9 +1707,6 @@ static const struct amdgpu_ip_block_version vi_common_ip_block =
 
 int vi_set_ip_blocks(struct amdgpu_device *adev)
 {
-       /* in early init stage, vbios code won't work */
-       vi_detect_hw_virtualization(adev);
-
        if (amdgpu_sriov_vf(adev))
                adev->virt.ops = &xgpu_vi_virt_ops;
 
index 19ddd2312e00d4c8f0178d4fe6130d8b80fb09b8..7a01e6133798014e9a5e290fa9d3d8003af21069 100644 (file)
 #              define PACKET3_DMA_DATA_CMD_SAIC    (1 << 28)
 #              define PACKET3_DMA_DATA_CMD_DAIC    (1 << 29)
 #              define PACKET3_DMA_DATA_CMD_RAW_WAIT  (1 << 30)
-#define        PACKET3_AQUIRE_MEM                              0x58
+#define        PACKET3_ACQUIRE_MEM                             0x58
 #define        PACKET3_REWIND                                  0x59
 #define        PACKET3_LOAD_UCONFIG_REG                        0x5E
 #define        PACKET3_LOAD_SH_REG                             0x5F
index 0ec5f25adf56b134461685e7cf8516caa681690f..cf0017f4d9d5ba99febc4ab1d109882643a286b6 100644 (file)
@@ -215,6 +215,7 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
        }
 
        q_properties->is_interop = false;
+       q_properties->is_gws = false;
        q_properties->queue_percent = args->queue_percentage;
        q_properties->priority = args->queue_priority;
        q_properties->queue_address = args->ring_base_address;
@@ -1322,6 +1323,10 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
                goto err_free;
        }
 
+       /* Update the VRAM usage count */
+       if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)
+               WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + args->size);
+
        mutex_unlock(&p->mutex);
 
        args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
@@ -1337,7 +1342,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
        return 0;
 
 err_free:
-       amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
+       amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem, NULL);
 err_unlock:
        mutex_unlock(&p->mutex);
        return err;
@@ -1351,6 +1356,7 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
        void *mem;
        struct kfd_dev *dev;
        int ret;
+       uint64_t size = 0;
 
        dev = kfd_device_by_id(GET_GPU_ID(args->handle));
        if (!dev)
@@ -1373,7 +1379,7 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
        }
 
        ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd,
-                                               (struct kgd_mem *)mem);
+                                               (struct kgd_mem *)mem, &size);
 
        /* If freeing the buffer failed, leave the handle in place for
         * clean-up during process tear-down.
@@ -1382,6 +1388,8 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
                kfd_process_device_remove_obj_handle(
                        pdd, GET_IDR_HANDLE(args->handle));
 
+       WRITE_ONCE(pdd->vram_usage, pdd->vram_usage - size);
+
 err_unlock:
        mutex_unlock(&p->mutex);
        return ret;
@@ -1584,6 +1592,45 @@ copy_from_user_failed:
        return err;
 }
 
+static int kfd_ioctl_alloc_queue_gws(struct file *filep,
+               struct kfd_process *p, void *data)
+{
+       int retval;
+       struct kfd_ioctl_alloc_queue_gws_args *args = data;
+       struct queue *q;
+       struct kfd_dev *dev;
+
+       mutex_lock(&p->mutex);
+       q = pqm_get_user_queue(&p->pqm, args->queue_id);
+
+       if (q) {
+               dev = q->device;
+       } else {
+               retval = -EINVAL;
+               goto out_unlock;
+       }
+
+       if (!dev->gws) {
+               retval = -ENODEV;
+               goto out_unlock;
+       }
+
+       if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
+               retval = -ENODEV;
+               goto out_unlock;
+       }
+
+       retval = pqm_set_gws(&p->pqm, args->queue_id, args->num_gws ? dev->gws : NULL);
+       mutex_unlock(&p->mutex);
+
+       args->first_gws = 0;
+       return retval;
+
+out_unlock:
+       mutex_unlock(&p->mutex);
+       return retval;
+}
+
 static int kfd_ioctl_get_dmabuf_info(struct file *filep,
                struct kfd_process *p, void *data)
 {
@@ -1687,7 +1734,7 @@ static int kfd_ioctl_import_dmabuf(struct file *filep,
        return 0;
 
 err_free:
-       amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
+       amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem, NULL);
 err_unlock:
        mutex_unlock(&p->mutex);
        return r;
@@ -1786,6 +1833,8 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
        AMDKFD_IOCTL_DEF(AMDKFD_IOC_IMPORT_DMABUF,
                                kfd_ioctl_import_dmabuf, 0),
 
+       AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_QUEUE_GWS,
+                       kfd_ioctl_alloc_queue_gws, 0),
 };
 
 #define AMDKFD_CORE_IOCTL_COUNT        ARRAY_SIZE(amdkfd_ioctls)
index de9f68d5c31278ea80e3456716cebd789bd0d54a..1009a3b8dcc2cc7e0ad06ce8a8713b0519d5a4bc 100644 (file)
@@ -502,7 +502,7 @@ int kfd_parse_crat_table(void *crat_image, struct list_head *device_list,
        num_nodes = crat_table->num_domains;
        image_len = crat_table->length;
 
-       pr_info("Parsing CRAT table with %d nodes\n", num_nodes);
+       pr_debug("Parsing CRAT table with %d nodes\n", num_nodes);
 
        for (node_id = 0; node_id < num_nodes; node_id++) {
                top_dev = kfd_create_topology_device(device_list);
index 05bc6d96ec5275b00a5b1eb9a69c506eee43ce44..0491ab2b4a9b3bb7746c45693d329669157b4b38 100644 (file)
@@ -569,6 +569,23 @@ static void kfd_cwsr_init(struct kfd_dev *kfd)
        }
 }
 
+static int kfd_gws_init(struct kfd_dev *kfd)
+{
+       int ret = 0;
+
+       if (kfd->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS)
+               return 0;
+
+       if (hws_gws_support
+               || (kfd->device_info->asic_family >= CHIP_VEGA10
+                       && kfd->device_info->asic_family <= CHIP_RAVEN
+                       && kfd->mec2_fw_version >= 0x1b3))
+               ret = amdgpu_amdkfd_alloc_gws(kfd->kgd,
+                               amdgpu_amdkfd_get_num_gws(kfd->kgd), &kfd->gws);
+
+       return ret;
+}
+
 bool kgd2kfd_device_init(struct kfd_dev *kfd,
                         struct drm_device *ddev,
                         const struct kgd2kfd_shared_resources *gpu_resources)
@@ -578,6 +595,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
        kfd->ddev = ddev;
        kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
                        KGD_ENGINE_MEC1);
+       kfd->mec2_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
+                       KGD_ENGINE_MEC2);
        kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
                        KGD_ENGINE_SDMA1);
        kfd->shared_resources = *gpu_resources;
@@ -598,13 +617,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
        } else
                kfd->max_proc_per_quantum = hws_max_conc_proc;
 
-       /* Allocate global GWS that is shared by all KFD processes */
-       if (hws_gws_support && amdgpu_amdkfd_alloc_gws(kfd->kgd,
-                       amdgpu_amdkfd_get_num_gws(kfd->kgd), &kfd->gws)) {
-               dev_err(kfd_device, "Could not allocate %d gws\n",
-                       amdgpu_amdkfd_get_num_gws(kfd->kgd));
-               goto out;
-       }
        /* calculate max size of mqds needed for queues */
        size = max_num_of_queues_per_device *
                        kfd->device_info->mqd_size_aligned;
@@ -662,6 +674,15 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
                goto device_queue_manager_error;
        }
 
+       /* If supported on this device, allocate global GWS that is shared
+        * by all KFD processes
+        */
+       if (kfd_gws_init(kfd)) {
+               dev_err(kfd_device, "Could not allocate %d gws\n",
+                       amdgpu_amdkfd_get_num_gws(kfd->kgd));
+               goto gws_error;
+       }
+
        if (kfd_iommu_device_init(kfd)) {
                dev_err(kfd_device, "Error initializing iommuv2\n");
                goto device_iommu_error;
@@ -691,6 +712,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
 kfd_topology_add_device_error:
 kfd_resume_error:
 device_iommu_error:
+gws_error:
        device_queue_manager_uninit(kfd->dqm);
 device_queue_manager_error:
        kfd_interrupt_exit(kfd);
@@ -701,7 +723,7 @@ kfd_doorbell_error:
 kfd_gtt_sa_init_error:
        amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem);
 alloc_gtt_mem_failure:
-       if (hws_gws_support)
+       if (kfd->gws)
                amdgpu_amdkfd_free_gws(kfd->kgd, kfd->gws);
        dev_err(kfd_device,
                "device %x:%x NOT added due to errors\n",
@@ -720,7 +742,7 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
                kfd_doorbell_fini(kfd);
                kfd_gtt_sa_fini(kfd);
                amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem);
-               if (hws_gws_support)
+               if (kfd->gws)
                        amdgpu_amdkfd_free_gws(kfd->kgd, kfd->gws);
        }
 
index 77ea0f0cb163b93d2819368214592b50e7466fe0..e9c4867abeffba32f3034d06e41a57a5709227dd 100644 (file)
@@ -505,8 +505,13 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
                deallocate_vmid(dqm, qpd, q);
        }
        qpd->queue_count--;
-       if (q->properties.is_active)
+       if (q->properties.is_active) {
                decrement_queue_count(dqm, q->properties.type);
+               if (q->properties.is_gws) {
+                       dqm->gws_queue_count--;
+                       qpd->mapped_gws_queue = false;
+               }
+       }
 
        return retval;
 }
@@ -583,6 +588,20 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
        else if (!q->properties.is_active && prev_active)
                decrement_queue_count(dqm, q->properties.type);
 
+       if (q->gws && !q->properties.is_gws) {
+               if (q->properties.is_active) {
+                       dqm->gws_queue_count++;
+                       pdd->qpd.mapped_gws_queue = true;
+               }
+               q->properties.is_gws = true;
+       } else if (!q->gws && q->properties.is_gws) {
+               if (q->properties.is_active) {
+                       dqm->gws_queue_count--;
+                       pdd->qpd.mapped_gws_queue = false;
+               }
+               q->properties.is_gws = false;
+       }
+
        if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS)
                retval = map_queues_cpsch(dqm);
        else if (q->properties.is_active &&
@@ -631,6 +650,10 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
                                q->properties.type)];
                q->properties.is_active = false;
                decrement_queue_count(dqm, q->properties.type);
+               if (q->properties.is_gws) {
+                       dqm->gws_queue_count--;
+                       qpd->mapped_gws_queue = false;
+               }
 
                if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
                        continue;
@@ -744,6 +767,10 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
                                q->properties.type)];
                q->properties.is_active = true;
                increment_queue_count(dqm, q->properties.type);
+               if (q->properties.is_gws) {
+                       dqm->gws_queue_count++;
+                       qpd->mapped_gws_queue = true;
+               }
 
                if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
                        continue;
@@ -913,6 +940,7 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
        INIT_LIST_HEAD(&dqm->queues);
        dqm->active_queue_count = dqm->next_pipe_to_allocate = 0;
        dqm->active_cp_queue_count = 0;
+       dqm->gws_queue_count = 0;
 
        for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
                int pipe_offset = pipe * get_queues_per_pipe(dqm);
@@ -1061,7 +1089,9 @@ static int set_sched_resources(struct device_queue_manager *dqm)
                        break;
                }
 
-               res.queue_mask |= (1ull << i);
+               res.queue_mask |= 1ull
+                       << amdgpu_queue_mask_bit_to_set_resource_bit(
+                               (struct amdgpu_device *)dqm->dev->kgd, i);
        }
        res.gws_mask = ~0ull;
        res.oac_mask = res.gds_heap_base = res.gds_heap_size = 0;
@@ -1082,7 +1112,7 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
        INIT_LIST_HEAD(&dqm->queues);
        dqm->active_queue_count = dqm->processes_count = 0;
        dqm->active_cp_queue_count = 0;
-
+       dqm->gws_queue_count = 0;
        dqm->active_runlist = false;
        dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
        dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
@@ -1432,6 +1462,10 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
                                KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
                if (retval == -ETIME)
                        qpd->reset_wavefronts = true;
+               if (q->properties.is_gws) {
+                       dqm->gws_queue_count--;
+                       qpd->mapped_gws_queue = false;
+               }
        }
 
        /*
@@ -1650,8 +1684,13 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
                else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
                        deallocate_sdma_queue(dqm, q);
 
-               if (q->properties.is_active)
+               if (q->properties.is_active) {
                        decrement_queue_count(dqm, q->properties.type);
+                       if (q->properties.is_gws) {
+                               dqm->gws_queue_count--;
+                               qpd->mapped_gws_queue = false;
+                       }
+               }
 
                dqm->total_queue_count--;
        }
index 50d919f814e9add4e77c4747aef9bf16677e1694..4afa015c69b11cf5c9bf4ce114a33359745aba28 100644 (file)
@@ -182,6 +182,7 @@ struct device_queue_manager {
        unsigned int            processes_count;
        unsigned int            active_queue_count;
        unsigned int            active_cp_queue_count;
+       unsigned int            gws_queue_count;
        unsigned int            total_queue_count;
        unsigned int            next_pipe_to_allocate;
        unsigned int            *allocated_queues;
index e05d75ecda21839e85a9c7990fed87df84503dd9..fce6ccabe38b49c875e9d5080ef97ffb154eaa18 100644 (file)
@@ -37,7 +37,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
        vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
        if (vmid < dev->vm_info.first_vmid_kfd ||
            vmid > dev->vm_info.last_vmid_kfd)
-               return 0;
+               return false;
 
        source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry);
        client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
@@ -69,7 +69,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
 
        /* If there is no valid PASID, it's likely a bug */
        if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt"))
-               return 0;
+               return false;
 
        /* Interrupt types we care about: various signals and faults.
         * They will be forwarded to a work queue (see below).
index 8d871514671eb737e498b2dcfb76d80ec08961ed..7c8786b9eb0aaad65571d876e138d45aad8a7f13 100644 (file)
@@ -192,7 +192,7 @@ static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid,
 
        dev_warn_ratelimited(kfd_device,
                        "Invalid PPR device %x:%x.%x pasid 0x%x address 0x%lX flags 0x%X",
-                       PCI_BUS_NUM(pdev->devfn),
+                       pdev->bus->number,
                        PCI_SLOT(pdev->devfn),
                        PCI_FUNC(pdev->devfn),
                        pasid,
index bae706462f962790be6ef3d463605c9f08a01ee6..a2b77d1df8540b516e2335b06544b07f9f70fda0 100644 (file)
@@ -126,6 +126,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_dev *dev,
 
        prop.queue_size = queue_size;
        prop.is_interop = false;
+       prop.is_gws = false;
        prop.priority = 1;
        prop.queue_percent = 100;
        prop.type = type;
index efdb75e7677b393a3173c5803a6047c78bb5b9d5..685ca82d42fe8a81a231dc8902c318103d80c7ed 100644 (file)
@@ -41,7 +41,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
                                unsigned int *rlib_size,
                                bool *over_subscription)
 {
-       unsigned int process_count, queue_count, compute_queue_count;
+       unsigned int process_count, queue_count, compute_queue_count, gws_queue_count;
        unsigned int map_queue_size;
        unsigned int max_proc_per_quantum = 1;
        struct kfd_dev *dev = pm->dqm->dev;
@@ -49,6 +49,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
        process_count = pm->dqm->processes_count;
        queue_count = pm->dqm->active_queue_count;
        compute_queue_count = pm->dqm->active_cp_queue_count;
+       gws_queue_count = pm->dqm->gws_queue_count;
 
        /* check if there is over subscription
         * Note: the arbitration between the number of VMIDs and
@@ -61,7 +62,8 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
                max_proc_per_quantum = dev->max_proc_per_quantum;
 
        if ((process_count > max_proc_per_quantum) ||
-           compute_queue_count > get_cp_queues_num(pm->dqm)) {
+           compute_queue_count > get_cp_queues_num(pm->dqm) ||
+           gws_queue_count > 1) {
                *over_subscription = true;
                pr_debug("Over subscribed runlist\n");
        }
index 2de01009f1b6d6efc623b1800c8e60d260179589..bdca9dc5f1181bdeebd8467b980e17f5d18ec583 100644 (file)
@@ -43,7 +43,7 @@ static int pm_map_process_v9(struct packet_manager *pm,
        packet->bitfields2.pasid = qpd->pqm->process->pasid;
        packet->bitfields14.gds_size = qpd->gds_size & 0x3F;
        packet->bitfields14.gds_size_hi = (qpd->gds_size >> 6) & 0xF;
-       packet->bitfields14.num_gws = qpd->num_gws;
+       packet->bitfields14.num_gws = (qpd->mapped_gws_queue) ? qpd->num_gws : 0;
        packet->bitfields14.num_oac = qpd->num_oac;
        packet->bitfields14.sdma_enable = 1;
        packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
index c24cad3c64ed250ee280f68bae38152637b78fee..f0587d94294d7e71ff759932ad5108c833065bf2 100644 (file)
@@ -282,6 +282,7 @@ struct kfd_dev {
 
        /* Firmware versions */
        uint16_t mec_fw_version;
+       uint16_t mec2_fw_version;
        uint16_t sdma_fw_version;
 
        /* Maximum process number mapped to HW scheduler */
@@ -410,6 +411,10 @@ enum KFD_QUEUE_PRIORITY {
  * @is_active: Defines if the queue is active or not. @is_active and
  * @is_evicted are protected by the DQM lock.
  *
+ * @is_gws: Defines if the queue has been updated to be GWS-capable or not.
+ * @is_gws should be protected by the DQM lock, since changing it can yield the
+ * possibility of updating DQM state on number of GWS queues.
+ *
  * @vmid: If the scheduling mode is no cp scheduling the field defines the vmid
  * of the queue.
  *
@@ -432,6 +437,7 @@ struct queue_properties {
        bool is_interop;
        bool is_evicted;
        bool is_active;
+       bool is_gws;
        /* Not relevant for user mode queues in cp scheduling */
        unsigned int vmid;
        /* Relevant only for sdma queues*/
@@ -563,6 +569,14 @@ struct qcm_process_device {
         */
        bool reset_wavefronts;
 
+       /* This flag tells us if this process has a GWS-capable
+        * queue that will be mapped into the runlist. It's
+        * possible to request a GWS BO, but not have the queue
+        * currently mapped, and this changes how the MAP_PROCESS
+        * PM4 packet is configured.
+        */
+       bool mapped_gws_queue;
+
        /*
         * All the memory management data should be here too
         */
@@ -615,6 +629,8 @@ enum kfd_pdd_bound {
        PDD_BOUND_SUSPENDED,
 };
 
+#define MAX_VRAM_FILENAME_LEN 11
+
 /* Data that is per-process-per device. */
 struct kfd_process_device {
        /*
@@ -657,6 +673,11 @@ struct kfd_process_device {
 
        /* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */
        enum kfd_pdd_bound bound;
+
+       /* VRAM usage */
+       uint64_t vram_usage;
+       struct attribute attr_vram;
+       char vram_filename[MAX_VRAM_FILENAME_LEN];
 };
 
 #define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
@@ -923,6 +944,8 @@ int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
                        void *gws);
 struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm,
                                                unsigned int qid);
+struct queue *pqm_get_user_queue(struct process_queue_manager *pqm,
+                                               unsigned int qid);
 int pqm_get_wave_state(struct process_queue_manager *pqm,
                       unsigned int qid,
                       void __user *ctl_stack,
index fe0cd49d4ea7ce1708dfc00e2388a4bd18dd8c9b..d27221ddcdeb3d920021431ad37a542a3a85ecea 100644 (file)
@@ -79,18 +79,22 @@ static struct kfd_procfs_tree procfs;
 static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
                               char *buffer)
 {
-       int val = 0;
-
        if (strcmp(attr->name, "pasid") == 0) {
                struct kfd_process *p = container_of(attr, struct kfd_process,
                                                     attr_pasid);
-               val = p->pasid;
+
+               return snprintf(buffer, PAGE_SIZE, "%d\n", p->pasid);
+       } else if (strncmp(attr->name, "vram_", 5) == 0) {
+               struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
+                                                             attr_vram);
+               if (pdd)
+                       return snprintf(buffer, PAGE_SIZE, "%llu\n", READ_ONCE(pdd->vram_usage));
        } else {
                pr_err("Invalid attribute");
                return -EINVAL;
        }
 
-       return snprintf(buffer, PAGE_SIZE, "%d\n", val);
+       return 0;
 }
 
 static void kfd_procfs_kobj_release(struct kobject *kobj)
@@ -206,6 +210,34 @@ int kfd_procfs_add_queue(struct queue *q)
        return 0;
 }
 
+int kfd_procfs_add_vram_usage(struct kfd_process *p)
+{
+       int ret = 0;
+       struct kfd_process_device *pdd;
+
+       if (!p)
+               return -EINVAL;
+
+       if (!p->kobj)
+               return -EFAULT;
+
+       /* Create proc/<pid>/vram_<gpuid> file for each GPU */
+       list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
+               snprintf(pdd->vram_filename, MAX_VRAM_FILENAME_LEN, "vram_%u",
+                        pdd->dev->id);
+               pdd->attr_vram.name = pdd->vram_filename;
+               pdd->attr_vram.mode = KFD_SYSFS_FILE_MODE;
+               sysfs_attr_init(&pdd->attr_vram);
+               ret = sysfs_create_file(p->kobj, &pdd->attr_vram);
+               if (ret)
+                       pr_warn("Creating vram usage for gpu id %d failed",
+                               (int)pdd->dev->id);
+       }
+
+       return ret;
+}
+
+
 void kfd_procfs_del_queue(struct queue *q)
 {
        if (!q)
@@ -248,7 +280,7 @@ static void kfd_process_free_gpuvm(struct kgd_mem *mem,
        struct kfd_dev *dev = pdd->dev;
 
        amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->vm);
-       amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem);
+       amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem, NULL);
 }
 
 /* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
@@ -312,7 +344,7 @@ sync_memory_failed:
        return err;
 
 err_map_mem:
-       amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem);
+       amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem, NULL);
 err_alloc_mem:
        *kptr = NULL;
        return err;
@@ -411,6 +443,11 @@ struct kfd_process *kfd_create_process(struct file *filep)
                                                        process->kobj);
                if (!process->kobj_queues)
                        pr_warn("Creating KFD proc/queues folder failed");
+
+               ret = kfd_procfs_add_vram_usage(process);
+               if (ret)
+                       pr_warn("Creating vram usage file for pid %d failed",
+                               (int)process->lead_thread->pid);
        }
 out:
        if (!IS_ERR(process))
@@ -488,7 +525,7 @@ static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
                                peer_pdd->dev->kgd, mem, peer_pdd->vm);
                }
 
-               amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem);
+               amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem, NULL);
                kfd_process_device_remove_obj_handle(pdd, id);
        }
 }
@@ -551,6 +588,7 @@ static void kfd_process_wq_release(struct work_struct *work)
 {
        struct kfd_process *p = container_of(work, struct kfd_process,
                                             release_work);
+       struct kfd_process_device *pdd;
 
        /* Remove the procfs files */
        if (p->kobj) {
@@ -558,6 +596,10 @@ static void kfd_process_wq_release(struct work_struct *work)
                kobject_del(p->kobj_queues);
                kobject_put(p->kobj_queues);
                p->kobj_queues = NULL;
+
+               list_for_each_entry(pdd, &p->per_device_data, per_device_list)
+                       sysfs_remove_file(p->kobj, &pdd->attr_vram);
+
                kobject_del(p->kobj);
                kobject_put(p->kobj);
                p->kobj = NULL;
@@ -858,10 +900,12 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
        pdd->qpd.dqm = dev->dqm;
        pdd->qpd.pqm = &p->pqm;
        pdd->qpd.evicted = 0;
+       pdd->qpd.mapped_gws_queue = false;
        pdd->process = p;
        pdd->bound = PDD_UNBOUND;
        pdd->already_dequeued = false;
        pdd->runtime_inuse = false;
+       pdd->vram_usage = 0;
        list_add(&pdd->per_device_list, &p->per_device_data);
 
        /* Init idr used for memory handle translation */
@@ -1078,7 +1122,7 @@ struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
        return p;
 }
 
-/* process_evict_queues - Evict all user queues of a process
+/* kfd_process_evict_queues - Evict all user queues of a process
  *
  * Eviction is reference-counted per process-device. This means multiple
  * evictions from different sources can be nested safely.
@@ -1118,7 +1162,7 @@ fail:
        return r;
 }
 
-/* process_restore_queues - Restore all user queues of a process */
+/* kfd_process_restore_queues - Restore all user queues of a process */
 int kfd_process_restore_queues(struct kfd_process *p)
 {
        struct kfd_process_device *pdd;
index 084c35f55d591f025d95053effe2d61b2117bf46..eb1635ac89887c18534e1ce1e969469386fcbf45 100644 (file)
@@ -476,6 +476,15 @@ struct kernel_queue *pqm_get_kernel_queue(
        return NULL;
 }
 
+struct queue *pqm_get_user_queue(struct process_queue_manager *pqm,
+                                       unsigned int qid)
+{
+       struct process_queue_node *pqn;
+
+       pqn = get_queue_by_qid(pqm, qid);
+       return pqn ? pqn->q : NULL;
+}
+
 int pqm_get_wave_state(struct process_queue_manager *pqm,
                       unsigned int qid,
                       void __user *ctl_stack,
index aa0bfa78a66741be2fbb0dfcb8b598d608119498..bb77f7af2b6d9e3f37ce33d56614952021dbd01c 100644 (file)
@@ -478,6 +478,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
                        dev->node_props.device_id);
        sysfs_show_32bit_prop(buffer, "location_id",
                        dev->node_props.location_id);
+       sysfs_show_32bit_prop(buffer, "domain",
+                       dev->node_props.domain);
        sysfs_show_32bit_prop(buffer, "drm_render_minor",
                        dev->node_props.drm_render_minor);
        sysfs_show_64bit_prop(buffer, "hive_id",
@@ -787,7 +789,6 @@ static int kfd_topology_update_sysfs(void)
 {
        int ret;
 
-       pr_info("Creating topology SYSFS entries\n");
        if (!sys_props.kobj_topology) {
                sys_props.kobj_topology =
                                kfd_alloc_struct(sys_props.kobj_topology);
@@ -1048,7 +1049,6 @@ int kfd_topology_init(void)
                sys_props.generation_count++;
                kfd_update_system_properties();
                kfd_debug_print_topology();
-               pr_info("Finished initializing topology\n");
        } else
                pr_err("Failed to update topology in sysfs ret=%d\n", ret);
 
@@ -1303,7 +1303,12 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
 
        dev->node_props.vendor_id = gpu->pdev->vendor;
        dev->node_props.device_id = gpu->pdev->device;
+       dev->node_props.capability |=
+               ((amdgpu_amdkfd_get_asic_rev_id(dev->gpu->kgd) <<
+                       HSA_CAP_ASIC_REVISION_SHIFT) &
+                       HSA_CAP_ASIC_REVISION_MASK);
        dev->node_props.location_id = pci_dev_id(gpu->pdev);
+       dev->node_props.domain = pci_domain_nr(gpu->pdev->bus);
        dev->node_props.max_engine_clk_fcompute =
                amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->kgd);
        dev->node_props.max_engine_clk_ccompute =
@@ -1317,7 +1322,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
                                gpu->device_info->num_xgmi_sdma_engines;
        dev->node_props.num_sdma_queues_per_engine =
                                gpu->device_info->num_sdma_queues_per_engine;
-       dev->node_props.num_gws = (hws_gws_support &&
+       dev->node_props.num_gws = (dev->gpu->gws &&
                dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ?
                amdgpu_amdkfd_get_num_gws(dev->gpu->kgd) : 0;
        dev->node_props.num_cp_queues = get_cp_queues_num(dev->gpu->dqm);
index 46eeecaf1b68ee67506524c196a7c8c747b3e2aa..326d9b26b7aa7fbbab553671ee1a0d348739d8bf 100644 (file)
@@ -41,7 +41,6 @@
 #define HSA_CAP_WATCH_POINTS_TOTALBITS_SHIFT   8
 #define HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK   0x00003000
 #define HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT  12
-#define HSA_CAP_RESERVED                       0xffffc000
 
 #define HSA_CAP_DOORBELL_TYPE_PRE_1_0          0x0
 #define HSA_CAP_DOORBELL_TYPE_1_0              0x1
 #define HSA_CAP_SRAM_EDCSUPPORTED              0x00080000
 #define HSA_CAP_MEM_EDCSUPPORTED               0x00100000
 #define HSA_CAP_RASEVENTNOTIFY                 0x00200000
+#define HSA_CAP_ASIC_REVISION_MASK             0x03c00000
+#define HSA_CAP_ASIC_REVISION_SHIFT            22
+
+#define HSA_CAP_RESERVED                       0xfc078000
 
 struct kfd_node_properties {
        uint64_t hive_id;
@@ -77,6 +80,7 @@ struct kfd_node_properties {
        uint32_t vendor_id;
        uint32_t device_id;
        uint32_t location_id;
+       uint32_t domain;
        uint32_t max_engine_clk_fcompute;
        uint32_t max_engine_clk_ccompute;
        int32_t  drm_render_minor;
index 87858bc57e640b33c7ab83021685a718ac06fe96..1911a34cc0602c88a21582e0ed507263279b8444 100644 (file)
@@ -21,16 +21,12 @@ config DRM_AMD_DC_HDCP
        bool "Enable HDCP support in DC"
        depends on DRM_AMD_DC
        help
-        Choose this option
-        if you want to support
-        HDCP authentication
+         Choose this option if you want to support HDCP authentication.
 
 config DEBUG_KERNEL_DC
        bool "Enable kgdb break in DC"
        depends on DRM_AMD_DC
        help
-         Choose this option
-         if you want to hit
-         kdgb_break in assert.
+         Choose this option if you want to hit kdgb_break in assert.
 
 endmenu
index 7fc15b82fe48afad1d276e416b1d8271d3953631..bdba0bfd6df1beebf875efc2c1d680edac599763 100644 (file)
@@ -30,7 +30,7 @@
 #include "dc.h"
 #include "dc/inc/core_types.h"
 #include "dal_asic_id.h"
-#include "dmub/inc/dmub_srv.h"
+#include "dmub/dmub_srv.h"
 #include "dc/inc/hw/dmcu.h"
 #include "dc/inc/hw/abm.h"
 #include "dc/dc_dmub_srv.h"
@@ -774,8 +774,9 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
                                fw_inst_const_size);
        }
 
-       memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data,
-              fw_bss_data_size);
+       if (fw_bss_data_size)
+               memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
+                      fw_bss_data, fw_bss_data_size);
 
        /* Copy firmware bios info into FB memory. */
        memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
@@ -917,6 +918,23 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
                goto error;
        }
 
+       if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
+               adev->dm.dc->debug.force_single_disp_pipe_split = false;
+               adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
+       }
+
+       if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
+               adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
+
+       if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
+               adev->dm.dc->debug.disable_stutter = true;
+
+       if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
+               adev->dm.dc->debug.disable_dsc = true;
+
+       if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
+               adev->dm.dc->debug.disable_clock_gate = true;
+
        r = dm_dmub_hw_init(adev);
        if (r) {
                DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
@@ -1214,6 +1232,10 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
                adev->dm.dmub_fw->data +
                le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
                le32_to_cpu(hdr->inst_const_bytes);
+       region_params.fw_inst_const =
+               adev->dm.dmub_fw->data +
+               le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
+               PSP_HEADER_BYTES;
 
        status = dmub_srv_calc_region_info(dmub_srv, &region_params,
                                           &region_info);
@@ -1333,9 +1355,14 @@ static int dm_late_init(void *handle)
        struct dmcu_iram_parameters params;
        unsigned int linear_lut[16];
        int i;
-       struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
+       struct dmcu *dmcu = NULL;
        bool ret = false;
 
+       if (!adev->dm.fw_dmcu)
+               return detect_mst_link_for_all_connectors(adev->ddev);
+
+       dmcu = adev->dm.dc->res_pool->dmcu;
+
        for (i = 0; i < 16; i++)
                linear_lut[i] = 0xFFFF * i / 15;
 
@@ -1511,12 +1538,115 @@ static int dm_hw_fini(void *handle)
        return 0;
 }
 
+
+static int dm_enable_vblank(struct drm_crtc *crtc);
+static void dm_disable_vblank(struct drm_crtc *crtc);
+
+static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
+                                struct dc_state *state, bool enable)
+{
+       enum dc_irq_source irq_source;
+       struct amdgpu_crtc *acrtc;
+       int rc = -EBUSY;
+       int i = 0;
+
+       for (i = 0; i < state->stream_count; i++) {
+               acrtc = get_crtc_by_otg_inst(
+                               adev, state->stream_status[i].primary_otg_inst);
+
+               if (acrtc && state->stream_status[i].plane_count != 0) {
+                       irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
+                       rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
+                       DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
+                                 acrtc->crtc_id, enable ? "en" : "dis", rc);
+                       if (rc)
+                               DRM_WARN("Failed to %s pflip interrupts\n",
+                                        enable ? "enable" : "disable");
+
+                       if (enable) {
+                               rc = dm_enable_vblank(&acrtc->base);
+                               if (rc)
+                                       DRM_WARN("Failed to enable vblank interrupts\n");
+                       } else {
+                               dm_disable_vblank(&acrtc->base);
+                       }
+
+               }
+       }
+
+}
+
+enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
+{
+       struct dc_state *context = NULL;
+       enum dc_status res = DC_ERROR_UNEXPECTED;
+       int i;
+       struct dc_stream_state *del_streams[MAX_PIPES];
+       int del_streams_count = 0;
+
+       memset(del_streams, 0, sizeof(del_streams));
+
+       context = dc_create_state(dc);
+       if (context == NULL)
+               goto context_alloc_fail;
+
+       dc_resource_state_copy_construct_current(dc, context);
+
+       /* First remove from context all streams */
+       for (i = 0; i < context->stream_count; i++) {
+               struct dc_stream_state *stream = context->streams[i];
+
+               del_streams[del_streams_count++] = stream;
+       }
+
+       /* Remove all planes for removed streams and then remove the streams */
+       for (i = 0; i < del_streams_count; i++) {
+               if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
+                       res = DC_FAIL_DETACH_SURFACES;
+                       goto fail;
+               }
+
+               res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
+               if (res != DC_OK)
+                       goto fail;
+       }
+
+
+       res = dc_validate_global_state(dc, context, false);
+
+       if (res != DC_OK) {
+               DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
+               goto fail;
+       }
+
+       res = dc_commit_state(dc, context);
+
+fail:
+       dc_release_state(context);
+
+context_alloc_fail:
+       return res;
+}
+
 static int dm_suspend(void *handle)
 {
        struct amdgpu_device *adev = handle;
        struct amdgpu_display_manager *dm = &adev->dm;
        int ret = 0;
 
+       if (adev->in_gpu_reset) {
+               mutex_lock(&dm->dc_lock);
+               dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
+
+               dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
+
+               amdgpu_dm_commit_zero_streams(dm->dc);
+
+               amdgpu_dm_irq_suspend(adev);
+
+               return ret;
+       }
+
        WARN_ON(adev->dm.cached_state);
        adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
 
@@ -1527,7 +1657,7 @@ static int dm_suspend(void *handle)
 
        dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
 
-       return ret;
+       return 0;
 }
 
 static struct amdgpu_dm_connector *
@@ -1631,6 +1761,46 @@ static void emulated_link_detect(struct dc_link *link)
 
 }
 
+static void dm_gpureset_commit_state(struct dc_state *dc_state,
+                                    struct amdgpu_display_manager *dm)
+{
+       struct {
+               struct dc_surface_update surface_updates[MAX_SURFACES];
+               struct dc_plane_info plane_infos[MAX_SURFACES];
+               struct dc_scaling_info scaling_infos[MAX_SURFACES];
+               struct dc_flip_addrs flip_addrs[MAX_SURFACES];
+               struct dc_stream_update stream_update;
+       } * bundle;
+       int k, m;
+
+       bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
+
+       if (!bundle) {
+               dm_error("Failed to allocate update bundle\n");
+               goto cleanup;
+       }
+
+       for (k = 0; k < dc_state->stream_count; k++) {
+               bundle->stream_update.stream = dc_state->streams[k];
+
+               for (m = 0; m < dc_state->stream_status->plane_count; m++) {
+                       bundle->surface_updates[m].surface =
+                               dc_state->stream_status->plane_states[m];
+                       bundle->surface_updates[m].surface->force_full_update =
+                               true;
+               }
+               dc_commit_updates_for_stream(
+                       dm->dc, bundle->surface_updates,
+                       dc_state->stream_status->plane_count,
+                       dc_state->streams[k], &bundle->stream_update, dc_state);
+       }
+
+cleanup:
+       kfree(bundle);
+
+       return;
+}
+
 static int dm_resume(void *handle)
 {
        struct amdgpu_device *adev = handle;
@@ -1647,8 +1817,44 @@ static int dm_resume(void *handle)
        struct dm_plane_state *dm_new_plane_state;
        struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
        enum dc_connection_type new_connection_type = dc_connection_none;
-       int i, r;
+       struct dc_state *dc_state;
+       int i, r, j;
+
+       if (adev->in_gpu_reset) {
+               dc_state = dm->cached_dc_state;
+
+               r = dm_dmub_hw_init(adev);
+               if (r)
+                       DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+
+               dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
+               dc_resume(dm->dc);
+
+               amdgpu_dm_irq_resume_early(adev);
+
+               for (i = 0; i < dc_state->stream_count; i++) {
+                       dc_state->streams[i]->mode_changed = true;
+                       for (j = 0; j < dc_state->stream_status->plane_count; j++) {
+                               dc_state->stream_status->plane_states[j]->update_flags.raw
+                                       = 0xffffffff;
+                       }
+               }
+
+               WARN_ON(!dc_commit_state(dm->dc, dc_state));
 
+               dm_gpureset_commit_state(dm->cached_dc_state, dm);
+
+               dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
+
+               dc_release_state(dm->cached_dc_state);
+               dm->cached_dc_state = NULL;
+
+               amdgpu_dm_irq_resume_late(adev);
+
+               mutex_unlock(&dm->dc_lock);
+
+               return 0;
+       }
        /* Recreate dc_state - DC invalidates it when setting power state to S3. */
        dc_release_state(dm_state->context);
        dm_state->context = dc_create_state(dm->dc);
@@ -3013,9 +3219,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
                goto fail;
        }
 
-       if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
-               dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
-
        /* No userspace support. */
        dm->dc->debug.disable_tri_buf = true;
 
@@ -3286,7 +3489,7 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state,
 }
 
 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
-                      uint64_t *tiling_flags)
+                      uint64_t *tiling_flags, bool *tmz_surface)
 {
        struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
        int r = amdgpu_bo_reserve(rbo, false);
@@ -3301,6 +3504,9 @@ static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
        if (tiling_flags)
                amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
 
+       if (tmz_surface)
+               *tmz_surface = amdgpu_bo_encrypted(rbo);
+
        amdgpu_bo_unreserve(rbo);
 
        return r;
@@ -3388,6 +3594,7 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
                             struct plane_size *plane_size,
                             struct dc_plane_dcc_param *dcc,
                             struct dc_plane_address *address,
+                            bool tmz_surface,
                             bool force_disable_dcc)
 {
        const struct drm_framebuffer *fb = &afb->base;
@@ -3398,6 +3605,8 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
        memset(dcc, 0, sizeof(*dcc));
        memset(address, 0, sizeof(*address));
 
+       address->tmz_surface = tmz_surface;
+
        if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
                plane_size->surface_size.x = 0;
                plane_size->surface_size.y = 0;
@@ -3588,6 +3797,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
                            const uint64_t tiling_flags,
                            struct dc_plane_info *plane_info,
                            struct dc_plane_address *address,
+                           bool tmz_surface,
                            bool force_disable_dcc)
 {
        const struct drm_framebuffer *fb = plane_state->fb;
@@ -3631,6 +3841,14 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
        case DRM_FORMAT_P010:
                plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
                break;
+       case DRM_FORMAT_XRGB16161616F:
+       case DRM_FORMAT_ARGB16161616F:
+               plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
+               break;
+       case DRM_FORMAT_XBGR16161616F:
+       case DRM_FORMAT_ABGR16161616F:
+               plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
+               break;
        default:
                DRM_ERROR(
                        "Unsupported screen format %s\n",
@@ -3670,7 +3888,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
                                           plane_info->rotation, tiling_flags,
                                           &plane_info->tiling_info,
                                           &plane_info->plane_size,
-                                          &plane_info->dcc, address,
+                                          &plane_info->dcc, address, tmz_surface,
                                           force_disable_dcc);
        if (ret)
                return ret;
@@ -3694,6 +3912,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
        struct dc_plane_info plane_info;
        uint64_t tiling_flags;
        int ret;
+       bool tmz_surface = false;
        bool force_disable_dcc = false;
 
        ret = fill_dc_scaling_info(plane_state, &scaling_info);
@@ -3705,7 +3924,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
        dc_plane_state->clip_rect = scaling_info.clip_rect;
        dc_plane_state->scaling_quality = scaling_info.scaling_quality;
 
-       ret = get_fb_info(amdgpu_fb, &tiling_flags);
+       ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
        if (ret)
                return ret;
 
@@ -3713,6 +3932,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
        ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
                                          &plane_info,
                                          &dc_plane_state->address,
+                                         tmz_surface,
                                          force_disable_dcc);
        if (ret)
                return ret;
@@ -3800,8 +4020,7 @@ static void update_stream_scaling_settings(const struct drm_display_mode *mode,
 
 static enum dc_color_depth
 convert_color_depth_from_display_info(const struct drm_connector *connector,
-                                     const struct drm_connector_state *state,
-                                     bool is_y420)
+                                     bool is_y420, int requested_bpc)
 {
        uint8_t bpc;
 
@@ -3821,10 +4040,7 @@ convert_color_depth_from_display_info(const struct drm_connector *connector,
                bpc = bpc ? bpc : 8;
        }
 
-       if (!state)
-               state = connector->state;
-
-       if (state) {
+       if (requested_bpc > 0) {
                /*
                 * Cap display bpc based on the user requested value.
                 *
@@ -3833,7 +4049,7 @@ convert_color_depth_from_display_info(const struct drm_connector *connector,
                 * or if this was called outside of atomic check, so it
                 * can't be used directly.
                 */
-               bpc = min(bpc, state->max_requested_bpc);
+               bpc = min_t(u8, bpc, requested_bpc);
 
                /* Round down to the nearest even number. */
                bpc = bpc - (bpc & 1);
@@ -3955,7 +4171,8 @@ static void fill_stream_properties_from_drm_display_mode(
        const struct drm_display_mode *mode_in,
        const struct drm_connector *connector,
        const struct drm_connector_state *connector_state,
-       const struct dc_stream_state *old_stream)
+       const struct dc_stream_state *old_stream,
+       int requested_bpc)
 {
        struct dc_crtc_timing *timing_out = &stream->timing;
        const struct drm_display_info *info = &connector->display_info;
@@ -3985,8 +4202,9 @@ static void fill_stream_properties_from_drm_display_mode(
 
        timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
        timing_out->display_color_depth = convert_color_depth_from_display_info(
-               connector, connector_state,
-               (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420));
+               connector,
+               (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
+               requested_bpc);
        timing_out->scan_type = SCANNING_TYPE_NODATA;
        timing_out->hdmi_vic = 0;
 
@@ -4192,7 +4410,8 @@ static struct dc_stream_state *
 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
                       const struct drm_display_mode *drm_mode,
                       const struct dm_connector_state *dm_state,
-                      const struct dc_stream_state *old_stream)
+                      const struct dc_stream_state *old_stream,
+                      int requested_bpc)
 {
        struct drm_display_mode *preferred_mode = NULL;
        struct drm_connector *drm_connector;
@@ -4277,10 +4496,10 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
        */
        if (!scale || mode_refresh != preferred_refresh)
                fill_stream_properties_from_drm_display_mode(stream,
-                       &mode, &aconnector->base, con_state, NULL);
+                       &mode, &aconnector->base, con_state, NULL, requested_bpc);
        else
                fill_stream_properties_from_drm_display_mode(stream,
-                       &mode, &aconnector->base, con_state, old_stream);
+                       &mode, &aconnector->base, con_state, old_stream, requested_bpc);
 
        stream->timing.flags.DSC = 0;
 
@@ -4317,14 +4536,10 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
 
        if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
                mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
-       if (stream->link->psr_feature_enabled)  {
+       if (stream->link->psr_settings.psr_feature_enabled)     {
                struct dc  *core_dc = stream->link->ctx->dc;
 
                if (dc_is_dmcu_initialized(core_dc)) {
-                       struct dmcu *dmcu = core_dc->res_pool->dmcu;
-
-                       stream->psr_version = dmcu->dmcu_version.psr_version;
-
                        //
                        // should decide stream support vsc sdp colorimetry capability
                        // before building vsc info packet
@@ -4803,16 +5018,54 @@ static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
        create_eml_sink(aconnector);
 }
 
+static struct dc_stream_state *
+create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+                               const struct drm_display_mode *drm_mode,
+                               const struct dm_connector_state *dm_state,
+                               const struct dc_stream_state *old_stream)
+{
+       struct drm_connector *connector = &aconnector->base;
+       struct amdgpu_device *adev = connector->dev->dev_private;
+       struct dc_stream_state *stream;
+       int requested_bpc = connector->state ? connector->state->max_requested_bpc : 8;
+       enum dc_status dc_result = DC_OK;
+
+       do {
+               stream = create_stream_for_sink(aconnector, drm_mode,
+                                               dm_state, old_stream,
+                                               requested_bpc);
+               if (stream == NULL) {
+                       DRM_ERROR("Failed to create stream for sink!\n");
+                       break;
+               }
+
+               dc_result = dc_validate_stream(adev->dm.dc, stream);
+
+               if (dc_result != DC_OK) {
+                       DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
+                                     drm_mode->hdisplay,
+                                     drm_mode->vdisplay,
+                                     drm_mode->clock,
+                                     dc_result);
+
+                       dc_stream_release(stream);
+                       stream = NULL;
+                       requested_bpc -= 2; /* lower bpc to retry validation */
+               }
+
+       } while (stream == NULL && requested_bpc >= 6);
+
+       return stream;
+}
+
 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
                                   struct drm_display_mode *mode)
 {
        int result = MODE_ERROR;
        struct dc_sink *dc_sink;
-       struct amdgpu_device *adev = connector->dev->dev_private;
        /* TODO: Unhardcode stream count */
        struct dc_stream_state *stream;
        struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
-       enum dc_status dc_result = DC_OK;
 
        if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
                        (mode->flags & DRM_MODE_FLAG_DBLSCAN))
@@ -4833,24 +5086,11 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec
                goto fail;
        }
 
-       stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
-       if (stream == NULL) {
-               DRM_ERROR("Failed to create stream for sink!\n");
-               goto fail;
-       }
-
-       dc_result = dc_validate_stream(adev->dm.dc, stream);
-
-       if (dc_result == DC_OK)
+       stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
+       if (stream) {
+               dc_stream_release(stream);
                result = MODE_OK;
-       else
-               DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
-                             mode->hdisplay,
-                             mode->vdisplay,
-                             mode->clock,
-                             dc_result);
-
-       dc_stream_release(stream);
+       }
 
 fail:
        /* TODO: error handling*/
@@ -5173,10 +5413,12 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
                return 0;
 
        if (!state->duplicated) {
+               int max_bpc = conn_state->max_requested_bpc;
                is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
                                aconnector->force_yuv420_output;
-               color_depth = convert_color_depth_from_display_info(connector, conn_state,
-                                                                   is_y420);
+               color_depth = convert_color_depth_from_display_info(connector,
+                                                                   is_y420,
+                                                                   max_bpc);
                bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
                clock = adjusted_mode->clock;
                dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
@@ -5331,6 +5573,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
        uint64_t tiling_flags;
        uint32_t domain;
        int r;
+       bool tmz_surface = false;
        bool force_disable_dcc = false;
 
        dm_plane_state_old = to_dm_plane_state(plane->state);
@@ -5380,6 +5623,8 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
 
        amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
 
+       tmz_surface = amdgpu_bo_encrypted(rbo);
+
        ttm_eu_backoff_reservation(&ticket, &list);
 
        afb->address = amdgpu_bo_gpu_offset(rbo);
@@ -5395,7 +5640,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
                        adev, afb, plane_state->format, plane_state->rotation,
                        tiling_flags, &plane_state->tiling_info,
                        &plane_state->plane_size, &plane_state->dcc,
-                       &plane_state->address,
+                       &plane_state->address, tmz_surface,
                        force_disable_dcc);
        }
 
@@ -5542,6 +5787,12 @@ static int get_plane_formats(const struct drm_plane *plane,
                        formats[num_formats++] = DRM_FORMAT_NV12;
                if (plane_cap && plane_cap->pixel_format_support.p010)
                        formats[num_formats++] = DRM_FORMAT_P010;
+               if (plane_cap && plane_cap->pixel_format_support.fp16) {
+                       formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
+                       formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
+                       formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
+                       formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
+               }
                break;
 
        case DRM_PLANE_TYPE_OVERLAY:
@@ -6569,6 +6820,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
        unsigned long flags;
        struct amdgpu_bo *abo;
        uint64_t tiling_flags;
+       bool tmz_surface = false;
        uint32_t target_vblank, last_flip_vblank;
        bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
        bool pflip_present = false;
@@ -6621,6 +6873,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
                if (new_pcrtc_state->color_mgmt_changed) {
                        bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
                        bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
+                       bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
                }
 
                fill_dc_scaling_info(new_plane_state,
@@ -6663,12 +6916,15 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
 
                amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
 
+               tmz_surface = amdgpu_bo_encrypted(abo);
+
                amdgpu_bo_unreserve(abo);
 
                fill_dc_plane_info_and_addr(
                        dm->adev, new_plane_state, tiling_flags,
                        &bundle->plane_infos[planes_count],
                        &bundle->flip_addrs[planes_count].address,
+                       tmz_surface,
                        false);
 
                DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
@@ -6814,7 +7070,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
                }
                mutex_lock(&dm->dc_lock);
                if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
-                               acrtc_state->stream->link->psr_allow_active)
+                               acrtc_state->stream->link->psr_settings.psr_allow_active)
                        amdgpu_dm_psr_disable(acrtc_state->stream);
 
                dc_commit_updates_for_stream(dm->dc,
@@ -6825,12 +7081,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
                                                     dc_state);
 
                if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
-                                               acrtc_state->stream->psr_version &&
-                                               !acrtc_state->stream->link->psr_feature_enabled)
+                               acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
+                               !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
                        amdgpu_dm_link_setup_psr(acrtc_state->stream);
                else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
-                                               acrtc_state->stream->link->psr_feature_enabled &&
-                                               !acrtc_state->stream->link->psr_allow_active) {
+                               acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
+                               !acrtc_state->stream->link->psr_settings.psr_allow_active) {
                        amdgpu_dm_psr_enable(acrtc_state->stream);
                }
 
@@ -7144,7 +7400,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                        DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
                        /* i.e. reset mode */
                        if (dm_old_crtc_state->stream) {
-                               if (dm_old_crtc_state->stream->link->psr_allow_active)
+                               if (dm_old_crtc_state->stream->link->psr_settings.psr_allow_active)
                                        amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
 
                                remove_stream(adev, acrtc, dm_old_crtc_state->stream);
@@ -7592,10 +7848,10 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
                if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
                        goto skip_modeset;
 
-               new_stream = create_stream_for_sink(aconnector,
-                                                    &new_crtc_state->mode,
-                                                   dm_new_conn_state,
-                                                   dm_old_crtc_state->stream);
+               new_stream = create_validate_stream_for_sink(aconnector,
+                                                            &new_crtc_state->mode,
+                                                            dm_new_conn_state,
+                                                            dm_old_crtc_state->stream);
 
                /*
                 * we can have no stream on ACTION_SET if a display
@@ -8056,6 +8312,7 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
                        struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
                        struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
                        uint64_t tiling_flags;
+                       bool tmz_surface = false;
 
                        new_plane_crtc = new_plane_state->crtc;
                        new_dm_plane_state = to_dm_plane_state(new_plane_state);
@@ -8085,6 +8342,8 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
                                                new_dm_plane_state->dc_state->gamma_correction;
                                bundle->surface_updates[num_plane].in_transfer_func =
                                                new_dm_plane_state->dc_state->in_transfer_func;
+                               bundle->surface_updates[num_plane].gamut_remap_matrix =
+                                               &new_dm_plane_state->dc_state->gamut_remap_matrix;
                                bundle->stream_update.gamut_remap =
                                                &new_dm_crtc_state->stream->gamut_remap_matrix;
                                bundle->stream_update.output_csc_transform =
@@ -8101,14 +8360,14 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
                        bundle->surface_updates[num_plane].scaling_info = scaling_info;
 
                        if (amdgpu_fb) {
-                               ret = get_fb_info(amdgpu_fb, &tiling_flags);
+                               ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
                                if (ret)
                                        goto cleanup;
 
                                ret = fill_dc_plane_info_and_addr(
                                        dm->adev, new_plane_state, tiling_flags,
                                        plane_info,
-                                       &flip_addr->address,
+                                       &flip_addr->address, tmz_surface,
                                        false);
                                if (ret)
                                        goto cleanup;
@@ -8609,8 +8868,17 @@ static void amdgpu_dm_set_psr_caps(struct dc_link *link)
                return;
        if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
                                        dpcd_data, sizeof(dpcd_data))) {
-               link->psr_feature_enabled = dpcd_data[0] ? true:false;
-               DRM_INFO("PSR support:%d\n", link->psr_feature_enabled);
+               link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
+
+               if (dpcd_data[0] == 0) {
+                       link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
+                       link->psr_settings.psr_feature_enabled = false;
+               } else {
+                       link->psr_settings.psr_version = DC_PSR_VERSION_1;
+                       link->psr_settings.psr_feature_enabled = true;
+               }
+
+               DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
        }
 }
 
@@ -8625,16 +8893,14 @@ static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
        struct dc_link *link = NULL;
        struct psr_config psr_config = {0};
        struct psr_context psr_context = {0};
-       struct dc *dc = NULL;
        bool ret = false;
 
        if (stream == NULL)
                return false;
 
        link = stream->link;
-       dc = link->ctx->dc;
 
-       psr_config.psr_version = dc->res_pool->dmcu->dmcu_version.psr_version;
+       psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
 
        if (psr_config.psr_version > 0) {
                psr_config.psr_exit_link_training_required = 0x1;
@@ -8646,7 +8912,7 @@ static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
                ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
 
        }
-       DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_feature_enabled);
+       DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_settings.psr_feature_enabled);
 
        return ret;
 }
index 5cab3e65d9925e6baf89cdadc19bae3c892e356d..d61186ff411d8535708db1abc6f747256e9c16d0 100644 (file)
@@ -315,6 +315,7 @@ struct amdgpu_display_manager {
 #endif
 
        struct drm_atomic_state *cached_state;
+       struct dc_state *cached_dc_state;
 
        struct dm_comressor_info compressor;
 
index 2233d293a707a9e79d9f6d4da4433eb2ad217a94..4dfb6b55bb2ede7f9408363d30b9dc4015a2ea56 100644 (file)
@@ -239,7 +239,8 @@ static int __set_output_tf(struct dc_transfer_func *func,
                 * instead to simulate this.
                 */
                gamma->type = GAMMA_CUSTOM;
-               res = mod_color_calculate_degamma_params(func, gamma, true);
+               res = mod_color_calculate_degamma_params(NULL, func,
+                                                       gamma, true);
        } else {
                /*
                 * Assume sRGB. The actual mapping will depend on whether the
@@ -271,7 +272,7 @@ static int __set_input_tf(struct dc_transfer_func *func,
 
        __drm_lut_to_dc_gamma(lut, gamma, false);
 
-       res = mod_color_calculate_degamma_params(func, gamma, true);
+       res = mod_color_calculate_degamma_params(NULL, func, gamma, true);
        dc_gamma_release(&gamma);
 
        return res ? 0 : -ENOMEM;
@@ -419,9 +420,21 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
                                      struct dc_plane_state *dc_plane_state)
 {
        const struct drm_color_lut *degamma_lut;
+       enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB;
        uint32_t degamma_size;
        int r;
 
+       /* Get the correct base transfer function for implicit degamma. */
+       switch (dc_plane_state->format) {
+       case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+       case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+               /* DC doesn't have a transfer function for BT601 specifically. */
+               tf = TRANSFER_FUNCTION_BT709;
+               break;
+       default:
+               break;
+       }
+
        if (crtc->cm_has_degamma) {
                degamma_lut = __extract_blob_lut(crtc->base.degamma_lut,
                                                 &degamma_size);
@@ -455,8 +468,7 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
                 * map these to the atomic one instead.
                 */
                if (crtc->cm_is_degamma_srgb)
-                       dc_plane_state->in_transfer_func->tf =
-                               TRANSFER_FUNCTION_SRGB;
+                       dc_plane_state->in_transfer_func->tf = tf;
                else
                        dc_plane_state->in_transfer_func->tf =
                                TRANSFER_FUNCTION_LINEAR;
@@ -471,7 +483,12 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
                 * in linear space. Assume that the input is sRGB.
                 */
                dc_plane_state->in_transfer_func->type = TF_TYPE_PREDEFINED;
-               dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
+               dc_plane_state->in_transfer_func->tf = tf;
+
+               if (tf != TRANSFER_FUNCTION_SRGB &&
+                   !mod_color_calculate_degamma_params(NULL,
+                           dc_plane_state->in_transfer_func, NULL, false))
+                       return -ENOMEM;
        } else {
                /* ...Otherwise we can just bypass the DGM block. */
                dc_plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
index 0461fecd68db336d0e16f1f5ab55bac24f4caf8d..076af267b4882c876d57f68ee8212858344af635 100644 (file)
@@ -32,7 +32,7 @@
 #include "amdgpu_dm.h"
 #include "amdgpu_dm_debugfs.h"
 #include "dm_helpers.h"
-#include "dmub/inc/dmub_srv.h"
+#include "dmub/dmub_srv.h"
 
 struct dmub_debugfs_trace_header {
        uint32_t entry_count;
@@ -838,6 +838,44 @@ static int vrr_range_show(struct seq_file *m, void *data)
        return 0;
 }
 
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+/*
+ * Returns the HDCP capability of the Display (1.4 for now).
+ *
+ * NOTE* Not all HDMI displays report their HDCP caps even when they are capable.
+ * Since its rare for a display to not be HDCP 1.4 capable, we set HDMI as always capable.
+ *
+ * Example usage: cat /sys/kernel/debug/dri/0/DP-1/hdcp_sink_capability
+ *             or cat /sys/kernel/debug/dri/0/HDMI-A-1/hdcp_sink_capability
+ */
+static int hdcp_sink_capability_show(struct seq_file *m, void *data)
+{
+       struct drm_connector *connector = m->private;
+       struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+       bool hdcp_cap, hdcp2_cap;
+
+       if (connector->status != connector_status_connected)
+               return -ENODEV;
+
+       seq_printf(m, "%s:%d HDCP version: ", connector->name, connector->base.id);
+
+       hdcp_cap = dc_link_is_hdcp14(aconnector->dc_link);
+       hdcp2_cap = dc_link_is_hdcp22(aconnector->dc_link);
+
+
+       if (hdcp_cap)
+               seq_printf(m, "%s ", "HDCP1.4");
+       if (hdcp2_cap)
+               seq_printf(m, "%s ", "HDCP2.2");
+
+       if (!hdcp_cap && !hdcp2_cap)
+               seq_printf(m, "%s ", "None");
+
+       seq_puts(m, "\n");
+
+       return 0;
+}
+#endif
 /* function description
  *
  * generic SDP message access for testing
@@ -964,6 +1002,9 @@ DEFINE_SHOW_ATTRIBUTE(dmub_fw_state);
 DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer);
 DEFINE_SHOW_ATTRIBUTE(output_bpc);
 DEFINE_SHOW_ATTRIBUTE(vrr_range);
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+DEFINE_SHOW_ATTRIBUTE(hdcp_sink_capability);
+#endif
 
 static const struct file_operations dp_link_settings_debugfs_fops = {
        .owner = THIS_MODULE,
@@ -1019,12 +1060,23 @@ static const struct {
                {"test_pattern", &dp_phy_test_pattern_fops},
                {"output_bpc", &output_bpc_fops},
                {"vrr_range", &vrr_range_fops},
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+               {"hdcp_sink_capability", &hdcp_sink_capability_fops},
+#endif
                {"sdp_message", &sdp_message_fops},
                {"aux_dpcd_address", &dp_dpcd_address_debugfs_fops},
                {"aux_dpcd_size", &dp_dpcd_size_debugfs_fops},
                {"aux_dpcd_data", &dp_dpcd_data_debugfs_fops}
 };
 
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+static const struct {
+       char *name;
+       const struct file_operations *fops;
+} hdmi_debugfs_entries[] = {
+               {"hdcp_sink_capability", &hdcp_sink_capability_fops}
+};
+#endif
 /*
  * Force YUV420 output if available from the given mode
  */
@@ -1093,6 +1145,15 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
        connector->debugfs_dpcd_address = 0;
        connector->debugfs_dpcd_size = 0;
 
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+       if (connector->base.connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+               for (i = 0; i < ARRAY_SIZE(hdmi_debugfs_entries); i++) {
+                       debugfs_create_file(hdmi_debugfs_entries[i].name,
+                                           0644, dir, connector,
+                                           hdmi_debugfs_entries[i].fops);
+               }
+       }
+#endif
 }
 
 /*
@@ -1167,8 +1228,9 @@ static int current_backlight_read(struct seq_file *m, void *data)
        struct drm_info_node *node = (struct drm_info_node *)m->private;
        struct drm_device *dev = node->minor->dev;
        struct amdgpu_device *adev = dev->dev_private;
-       struct dc *dc = adev->dm.dc;
-       unsigned int backlight = dc_get_current_backlight_pwm(dc);
+       struct amdgpu_display_manager *dm = &adev->dm;
+
+       unsigned int backlight = dc_link_get_backlight_level(dm->backlight_link);
 
        seq_printf(m, "0x%x\n", backlight);
        return 0;
@@ -1184,8 +1246,9 @@ static int target_backlight_read(struct seq_file *m, void *data)
        struct drm_info_node *node = (struct drm_info_node *)m->private;
        struct drm_device *dev = node->minor->dev;
        struct amdgpu_device *adev = dev->dev_private;
-       struct dc *dc = adev->dm.dc;
-       unsigned int backlight = dc_get_target_backlight_pwm(dc);
+       struct amdgpu_display_manager *dm = &adev->dm;
+
+       unsigned int backlight = dc_link_get_target_backlight_pwm(dm->backlight_link);
 
        seq_printf(m, "0x%x\n", backlight);
        return 0;
index c20fb08c450be4489f020a1030d20c9fa3791cae..b086d5c906e0f21d9d190cde994088f11a2e97a2 100644 (file)
@@ -445,7 +445,7 @@ bool dm_helpers_dp_read_dpcd(
        struct amdgpu_dm_connector *aconnector = link->priv;
 
        if (!aconnector) {
-               DRM_ERROR("Failed to find connector for link!");
+               DC_LOG_DC("Failed to find connector for link!\n");
                return false;
        }
 
@@ -554,6 +554,7 @@ enum dc_edid_status dm_helpers_read_local_edid(
                struct dc_sink *sink)
 {
        struct amdgpu_dm_connector *aconnector = link->priv;
+       struct drm_connector *connector = &aconnector->base;
        struct i2c_adapter *ddc;
        int retry = 3;
        enum dc_edid_status edid_status;
@@ -571,6 +572,15 @@ enum dc_edid_status dm_helpers_read_local_edid(
 
                edid = drm_get_edid(&aconnector->base, ddc);
 
+               /* DP Compliance Test 4.2.2.6 */
+               if (link->aux_mode && connector->edid_corrupt)
+                       drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, connector->real_edid_checksum);
+
+               if (!edid && connector->edid_corrupt) {
+                       connector->edid_corrupt = false;
+                       return EDID_BAD_CHECKSUM;
+               }
+
                if (!edid)
                        return EDID_NO_RESPONSE;
 
@@ -605,34 +615,10 @@ enum dc_edid_status dm_helpers_read_local_edid(
                DRM_ERROR("EDID err: %d, on connector: %s",
                                edid_status,
                                aconnector->base.name);
-       if (link->aux_mode) {
-               union test_request test_request = { {0} };
-               union test_response test_response = { {0} };
-
-               dm_helpers_dp_read_dpcd(ctx,
-                                       link,
-                                       DP_TEST_REQUEST,
-                                       &test_request.raw,
-                                       sizeof(union test_request));
-
-               if (!test_request.bits.EDID_READ)
-                       return edid_status;
 
-               test_response.bits.EDID_CHECKSUM_WRITE = 1;
-
-               dm_helpers_dp_write_dpcd(ctx,
-                                       link,
-                                       DP_TEST_EDID_CHECKSUM,
-                                       &sink->dc_edid.raw_edid[sink->dc_edid.length-1],
-                                       1);
-
-               dm_helpers_dp_write_dpcd(ctx,
-                                       link,
-                                       DP_TEST_RESPONSE,
-                                       &test_response.raw,
-                                       sizeof(test_response));
-
-       }
+       /* DP Compliance Test 4.2.2.3 */
+       if (link->aux_mode)
+               drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, sink->dc_edid.raw_edid[sink->dc_edid.length-1]);
 
        return edid_status;
 }
index d2917759b7ab1b70abdea458e5c8d97c16e0a344..ae0a7ef1d595a6e226754a007e6c1f163cad254b 100644 (file)
 #include "amdgpu_dm_debugfs.h"
 #endif
 
-
 #if defined(CONFIG_DRM_AMD_DC_DCN)
 #include "dc/dcn20/dcn20_resource.h"
 #endif
 
-/* #define TRACE_DPCD */
-
-#ifdef TRACE_DPCD
-#define SIDE_BAND_MSG(address) (address >= DP_SIDEBAND_MSG_DOWN_REQ_BASE && address < DP_SINK_COUNT_ESI)
-
-static inline char *side_band_msg_type_to_str(uint32_t address)
-{
-       static char str[10] = {0};
-
-       if (address < DP_SIDEBAND_MSG_UP_REP_BASE)
-               strcpy(str, "DOWN_REQ");
-       else if (address < DP_SIDEBAND_MSG_DOWN_REP_BASE)
-               strcpy(str, "UP_REP");
-       else if (address < DP_SIDEBAND_MSG_UP_REQ_BASE)
-               strcpy(str, "DOWN_REP");
-       else
-               strcpy(str, "UP_REQ");
-
-       return str;
-}
-
-static void log_dpcd(uint8_t type,
-                    uint32_t address,
-                    uint8_t *data,
-                    uint32_t size,
-                    bool res)
-{
-       DRM_DEBUG_KMS("Op: %s, addr: %04x, SideBand Msg: %s, Op res: %s\n",
-                       (type == DP_AUX_NATIVE_READ) ||
-                       (type == DP_AUX_I2C_READ) ?
-                                       "Read" : "Write",
-                       address,
-                       SIDE_BAND_MSG(address) ?
-                                       side_band_msg_type_to_str(address) : "Nop",
-                       res ? "OK" : "Fail");
-
-       if (res) {
-               print_hex_dump(KERN_INFO, "Body: ", DUMP_PREFIX_NONE, 16, 1, data, size, false);
-       }
-}
-#endif
-
 static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
                                  struct drm_dp_aux_msg *msg)
 {
@@ -136,17 +93,23 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
 static void
 dm_dp_mst_connector_destroy(struct drm_connector *connector)
 {
-       struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
-       struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder;
+       struct amdgpu_dm_connector *aconnector =
+               to_amdgpu_dm_connector(connector);
+       struct amdgpu_encoder *amdgpu_encoder = aconnector->mst_encoder;
 
-       kfree(amdgpu_dm_connector->edid);
-       amdgpu_dm_connector->edid = NULL;
+       if (aconnector->dc_sink) {
+               dc_link_remove_remote_sink(aconnector->dc_link,
+                                          aconnector->dc_sink);
+               dc_sink_release(aconnector->dc_sink);
+       }
+
+       kfree(aconnector->edid);
 
        drm_encoder_cleanup(&amdgpu_encoder->base);
        kfree(amdgpu_encoder);
        drm_connector_cleanup(connector);
-       drm_dp_mst_put_port_malloc(amdgpu_dm_connector->port);
-       kfree(amdgpu_dm_connector);
+       drm_dp_mst_put_port_malloc(aconnector->port);
+       kfree(aconnector);
 }
 
 static int
@@ -435,40 +398,13 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
         */
        amdgpu_dm_connector_funcs_reset(connector);
 
-       DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n",
-                aconnector, connector->base.id, aconnector->mst_port);
-
        drm_dp_mst_get_port_malloc(port);
 
-       DRM_DEBUG_KMS(":%d\n", connector->base.id);
-
        return connector;
 }
 
-static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
-                                       struct drm_connector *connector)
-{
-       struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
-
-       DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n",
-                aconnector, connector->base.id, aconnector->mst_port);
-
-       if (aconnector->dc_sink) {
-               amdgpu_dm_update_freesync_caps(connector, NULL);
-               dc_link_remove_remote_sink(aconnector->dc_link,
-                                          aconnector->dc_sink);
-               dc_sink_release(aconnector->dc_sink);
-               aconnector->dc_sink = NULL;
-               aconnector->dc_link->cur_link_settings.lane_count = 0;
-       }
-
-       drm_connector_unregister(connector);
-       drm_connector_put(connector);
-}
-
 static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
        .add_connector = dm_dp_add_mst_connector,
-       .destroy_connector = dm_dp_destroy_mst_connector,
 };
 
 void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
index 7ad0cad0f4efedd9fcec850d18b13c596872f21e..01b99e0d788e38b0121a4e44db6887c79d0c90f9 100644 (file)
@@ -24,8 +24,7 @@
 # It provides the general basic services required by other DAL
 # subcomponents.
 
-BASICS = conversion.o fixpt31_32.o \
-       log_helpers.o vector.o dc_common.o
+BASICS = conversion.o fixpt31_32.o vector.o dc_common.o
 
 AMD_DAL_BASICS = $(addprefix $(AMDDALPATH)/dc/basics/,$(BASICS))
 
index 8edc2506d49e7b1838287a69875ab62429e6774b..bed91572f82a6a3bd44ab338c8ad76c2d294147e 100644 (file)
@@ -113,13 +113,19 @@ static void encoder_control_dmcub(
                struct dc_dmub_srv *dmcub,
                struct dig_encoder_stream_setup_parameters_v1_5 *dig)
 {
-       struct dmub_rb_cmd_digx_encoder_control encoder_control = { 0 };
+       union dmub_rb_cmd cmd;
 
-       encoder_control.header.type = DMUB_CMD__VBIOS;
-       encoder_control.header.sub_type = DMUB_CMD__VBIOS_DIGX_ENCODER_CONTROL;
-       encoder_control.encoder_control.dig.stream_param = *dig;
+       memset(&cmd, 0, sizeof(cmd));
 
-       dc_dmub_srv_cmd_queue(dmcub, &encoder_control.header);
+       cmd.digx_encoder_control.header.type = DMUB_CMD__VBIOS;
+       cmd.digx_encoder_control.header.sub_type =
+               DMUB_CMD__VBIOS_DIGX_ENCODER_CONTROL;
+       cmd.digx_encoder_control.header.payload_bytes =
+               sizeof(cmd.digx_encoder_control) -
+               sizeof(cmd.digx_encoder_control.header);
+       cmd.digx_encoder_control.encoder_control.dig.stream_param = *dig;
+
+       dc_dmub_srv_cmd_queue(dmcub, &cmd);
        dc_dmub_srv_cmd_execute(dmcub);
        dc_dmub_srv_wait_idle(dmcub);
 }
@@ -238,14 +244,19 @@ static void transmitter_control_dmcub(
                struct dc_dmub_srv *dmcub,
                struct dig_transmitter_control_parameters_v1_6 *dig)
 {
-       struct dmub_rb_cmd_dig1_transmitter_control transmitter_control;
+       union dmub_rb_cmd cmd;
+
+       memset(&cmd, 0, sizeof(cmd));
 
-       transmitter_control.header.type = DMUB_CMD__VBIOS;
-       transmitter_control.header.sub_type =
+       cmd.dig1_transmitter_control.header.type = DMUB_CMD__VBIOS;
+       cmd.dig1_transmitter_control.header.sub_type =
                DMUB_CMD__VBIOS_DIG1_TRANSMITTER_CONTROL;
-       transmitter_control.transmitter_control.dig = *dig;
+       cmd.dig1_transmitter_control.header.payload_bytes =
+               sizeof(cmd.dig1_transmitter_control) -
+               sizeof(cmd.dig1_transmitter_control.header);
+       cmd.dig1_transmitter_control.transmitter_control.dig = *dig;
 
-       dc_dmub_srv_cmd_queue(dmcub, &transmitter_control.header);
+       dc_dmub_srv_cmd_queue(dmcub, &cmd);
        dc_dmub_srv_cmd_execute(dmcub);
        dc_dmub_srv_wait_idle(dmcub);
 }
@@ -339,13 +350,18 @@ static void set_pixel_clock_dmcub(
                struct dc_dmub_srv *dmcub,
                struct set_pixel_clock_parameter_v1_7 *clk)
 {
-       struct dmub_rb_cmd_set_pixel_clock pixel_clock = { 0 };
+       union dmub_rb_cmd cmd;
 
-       pixel_clock.header.type = DMUB_CMD__VBIOS;
-       pixel_clock.header.sub_type = DMUB_CMD__VBIOS_SET_PIXEL_CLOCK;
-       pixel_clock.pixel_clock.clk = *clk;
+       memset(&cmd, 0, sizeof(cmd));
 
-       dc_dmub_srv_cmd_queue(dmcub, &pixel_clock.header);
+       cmd.set_pixel_clock.header.type = DMUB_CMD__VBIOS;
+       cmd.set_pixel_clock.header.sub_type = DMUB_CMD__VBIOS_SET_PIXEL_CLOCK;
+       cmd.set_pixel_clock.header.payload_bytes =
+               sizeof(cmd.set_pixel_clock) -
+               sizeof(cmd.set_pixel_clock.header);
+       cmd.set_pixel_clock.pixel_clock.clk = *clk;
+
+       dc_dmub_srv_cmd_queue(dmcub, &cmd);
        dc_dmub_srv_cmd_execute(dmcub);
        dc_dmub_srv_wait_idle(dmcub);
 }
@@ -705,13 +721,19 @@ static void enable_disp_power_gating_dmcub(
        struct dc_dmub_srv *dmcub,
        struct enable_disp_power_gating_parameters_v2_1 *pwr)
 {
-       struct dmub_rb_cmd_enable_disp_power_gating power_gating;
+       union dmub_rb_cmd cmd;
+
+       memset(&cmd, 0, sizeof(cmd));
 
-       power_gating.header.type = DMUB_CMD__VBIOS;
-       power_gating.header.sub_type = DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING;
-       power_gating.power_gating.pwr = *pwr;
+       cmd.enable_disp_power_gating.header.type = DMUB_CMD__VBIOS;
+       cmd.enable_disp_power_gating.header.sub_type =
+               DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING;
+       cmd.enable_disp_power_gating.header.payload_bytes =
+               sizeof(cmd.enable_disp_power_gating) -
+               sizeof(cmd.enable_disp_power_gating.header);
+       cmd.enable_disp_power_gating.power_gating.pwr = *pwr;
 
-       dc_dmub_srv_cmd_queue(dmcub, &power_gating.header);
+       dc_dmub_srv_cmd_queue(dmcub, &cmd);
        dc_dmub_srv_cmd_execute(dmcub);
        dc_dmub_srv_wait_idle(dmcub);
 }
index 3960a8db94cbe9a937c72d08bfac8ee92ec6f003..1e5a92b192a1528261df979cc3e1fa210670f202 100644 (file)
@@ -690,6 +690,26 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v,
                struct dc_debug_options *dbg,
                struct dc_state *context)
 {
+       int i;
+
+       for (i = 0; i < MAX_PIPES; i++) {
+               struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+               /**
+                * Workaround for avoiding pipe-split in cases where we'd split
+                * planes that are too small, resulting in splits that aren't
+                * valid for the scaler.
+                */
+               if (pipe->plane_state &&
+                   (pipe->plane_state->dst_rect.width <= 16 ||
+                    pipe->plane_state->dst_rect.height <= 16 ||
+                    pipe->plane_state->src_rect.width <= 16 ||
+                    pipe->plane_state->src_rect.height <= 16)) {
+                       hack_disable_optional_pipe_split(v);
+                       return;
+               }
+       }
+
        if (dbg->pipe_split_policy == MPC_SPLIT_AVOID)
                hack_disable_optional_pipe_split(v);
 
@@ -702,7 +722,6 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v,
                hack_force_pipe_split(v, context->streams[0]->timing.pix_clk_100hz);
 }
 
-
 unsigned int get_highest_allowed_voltage_level(uint32_t hw_internal_rev, uint32_t pci_revision_id)
 {
        /* for low power RV2 variants, the highest voltage level we want is 0 */
index 8ec2dfe45d4009877756d7d07047cbd267cd59fb..a5c2114e4292f375a0192515d138ef879e294d5d 100644 (file)
@@ -90,7 +90,7 @@ void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_m
                dc->hwss.exit_optimized_pwr_state(dc, dc->current_state);
 
        if (edp_link) {
-               clk_mgr->psr_allow_active_cache = edp_link->psr_allow_active;
+               clk_mgr->psr_allow_active_cache = edp_link->psr_settings.psr_allow_active;
                dc_link_set_psr_allow_active(edp_link, false, false);
        }
 
index 26db1c5d4e4d2619dea1ba09c4d8ad2f92678cd9..b210f8e9d592d1168685725b9947e1777ada7958 100644 (file)
@@ -131,7 +131,7 @@ int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base)
        struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
        int dprefclk_wdivider;
        int dprefclk_src_sel;
-       int dp_ref_clk_khz = 600000;
+       int dp_ref_clk_khz;
        int target_div;
 
        /* ASSERT DP Reference Clock source is from DFS*/
index 97b7f32294fd85943b916505e76f5f8adb4bb0f2..c320b7af7d34ca77683006401c274ac9c1dbd9b9 100644 (file)
@@ -97,9 +97,6 @@ int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_di
                        VBIOSSMC_MSG_SetDispclkFreq,
                        requested_dispclk_khz / 1000);
 
-       /* Actual dispclk set is returned in the parameter register */
-       actual_dispclk_set_mhz = REG_READ(MP1_SMN_C2PMSG_83) * 1000;
-
        if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
                if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
                        if (clk_mgr->dfs_bypass_disp_clk != actual_dispclk_set_mhz)
index 47431ca6986db7443ec7397678621e8985bdbcbb..45cfb7c45566aff4e017a166252503a1e1089a91 100644 (file)
@@ -66,6 +66,8 @@
 
 #include "dce/dce_i2c.h"
 
+#include "dmub/dmub_srv.h"
+
 #define CTX \
        dc->ctx
 
@@ -348,7 +350,7 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
 
        for (i = 0; i < MAX_PIPES; i++) {
                pipe = &dc->current_state->res_ctx.pipe_ctx[i];
-               if (pipe->stream == stream)
+               if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
                        break;
        }
        /* Stream not found */
@@ -365,6 +367,9 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
        param.windowb_x_end = pipe->stream->timing.h_addressable;
        param.windowb_y_end = pipe->stream->timing.v_addressable;
 
+       param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0;
+       param.odm_mode = pipe->next_odm_pipe ? 1:0;
+
        /* Default to the union of both windows */
        param.selection = UNION_WINDOW_A_B;
        param.continuous_mode = continuous;
@@ -2204,7 +2209,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
 
                                if (should_program_abm) {
                                        if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
-                                               pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm);
+                                               dc->hwss.set_abm_immediate_disable(pipe_ctx);
                                        } else {
                                                pipe_ctx->stream_res.abm->funcs->set_abm_level(
                                                        pipe_ctx->stream_res.abm, stream->abm_level);
@@ -2640,33 +2645,12 @@ void dc_set_power_state(
 
 void dc_resume(struct dc *dc)
 {
-
        uint32_t i;
 
        for (i = 0; i < dc->link_count; i++)
                core_link_resume(dc->links[i]);
 }
 
-unsigned int dc_get_current_backlight_pwm(struct dc *dc)
-{
-       struct abm *abm = dc->res_pool->abm;
-
-       if (abm)
-               return abm->funcs->get_current_backlight(abm);
-
-       return 0;
-}
-
-unsigned int dc_get_target_backlight_pwm(struct dc *dc)
-{
-       struct abm *abm = dc->res_pool->abm;
-
-       if (abm)
-               return abm->funcs->get_target_backlight(abm);
-
-       return 0;
-}
-
 bool dc_is_dmcu_initialized(struct dc *dc)
 {
        struct dmcu *dmcu = dc->res_pool->dmcu;
index 67cfff1586e9fe6e91cd880e4d6a5942cf2ed8c1..48ab51533d5d6affd96ed25b8dec379af671be78 100644 (file)
@@ -26,7 +26,7 @@
 #include <linux/slab.h>
 
 #include "dm_services.h"
-#include "atom.h"
+#include "atomfirmware.h"
 #include "dm_helpers.h"
 #include "dc.h"
 #include "grph_object_id.h"
 #include "dmcu.h"
 #include "hw/clk_mgr.h"
 #include "dce/dmub_psr.h"
+#include "dmub/dmub_srv.h"
+#include "inc/hw/panel_cntl.h"
 
 #define DC_LOGGER_INIT(logger)
 
-
 #define LINK_INFO(...) \
        DC_LOG_HW_HOTPLUG(  \
                __VA_ARGS__)
 enum {
        PEAK_FACTOR_X1000 = 1006,
        /*
-       * Some receivers fail to train on first try and are good
-       * on subsequent tries. 2 retries should be plenty. If we
-       * don't have a successful training then we don't expect to
-       * ever get one.
-       */
+        * Some receivers fail to train on first try and are good
+        * on subsequent tries. 2 retries should be plenty. If we
+        * don't have a successful training then we don't expect to
+        * ever get one.
+        */
        LINK_TRAINING_MAX_VERIFY_RETRY = 2
 };
 
@@ -79,7 +80,7 @@ static void dc_link_destruct(struct dc_link *link)
 {
        int i;
 
-       if (link->hpd_gpio != NULL) {
+       if (link->hpd_gpio) {
                dal_gpio_destroy_irq(&link->hpd_gpio);
                link->hpd_gpio = NULL;
        }
@@ -87,7 +88,10 @@ static void dc_link_destruct(struct dc_link *link)
        if (link->ddc)
                dal_ddc_service_destroy(&link->ddc);
 
-       if(link->link_enc)
+       if (link->panel_cntl)
+               link->panel_cntl->funcs->destroy(&link->panel_cntl);
+
+       if (link->link_enc)
                link->link_enc->funcs->destroy(&link->link_enc);
 
        if (link->local_sink)
@@ -98,8 +102,8 @@ static void dc_link_destruct(struct dc_link *link)
 }
 
 struct gpio *get_hpd_gpio(struct dc_bios *dcb,
-               struct graphics_object_id link_id,
-               struct gpio_service *gpio_service)
+                         struct graphics_object_id link_id,
+                         struct gpio_service *gpio_service)
 {
        enum bp_result bp_result;
        struct graphics_object_hpd_info hpd_info;
@@ -116,10 +120,9 @@ struct gpio *get_hpd_gpio(struct dc_bios *dcb,
                return NULL;
        }
 
-       return dal_gpio_service_create_irq(
-               gpio_service,
-               pin_info.offset,
-               pin_info.mask);
+       return dal_gpio_service_create_irq(gpio_service,
+                                          pin_info.offset,
+                                          pin_info.mask);
 }
 
 /*
@@ -134,13 +137,10 @@ struct gpio *get_hpd_gpio(struct dc_bios *dcb,
  *  @return
  *     true on success, false otherwise
  */
-static bool program_hpd_filter(
-       const struct dc_link *link)
+static bool program_hpd_filter(const struct dc_link *link)
 {
        bool result = false;
-
        struct gpio *hpd;
-
        int delay_on_connect_in_ms = 0;
        int delay_on_disconnect_in_ms = 0;
 
@@ -159,10 +159,10 @@ static bool program_hpd_filter(
        case SIGNAL_TYPE_DISPLAY_PORT_MST:
                /* Program hpd filter to allow DP signal to settle */
                /* 500: not able to detect MST <-> SST switch as HPD is low for
-                *      only 100ms on DELL U2413
-                * 0:   some passive dongle still show aux mode instead of i2c
-                * 20-50:not enough to hide bouncing HPD with passive dongle.
-                *      also see intermittent i2c read issues.
+                * only 100ms on DELL U2413
+                * 0: some passive dongle still show aux mode instead of i2c
+                * 20-50: not enough to hide bouncing HPD with passive dongle.
+                * also see intermittent i2c read issues.
                 */
                delay_on_connect_in_ms = 80;
                delay_on_disconnect_in_ms = 0;
@@ -175,7 +175,8 @@ static bool program_hpd_filter(
        }
 
        /* Obtain HPD handle */
-       hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
+       hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id,
+                          link->ctx->gpio_service);
 
        if (!hpd)
                return result;
@@ -226,8 +227,9 @@ bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)
        }
 
        /* todo: may need to lock gpio access */
-       hpd_pin = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
-       if (hpd_pin == NULL)
+       hpd_pin = get_hpd_gpio(link->ctx->dc_bios, link->link_id,
+                              link->ctx->gpio_service);
+       if (!hpd_pin)
                goto hpd_gpio_failure;
 
        dal_gpio_open(hpd_pin, GPIO_MODE_INTERRUPT);
@@ -248,8 +250,7 @@ hpd_gpio_failure:
        return false;
 }
 
-static enum ddc_transaction_type get_ddc_transaction_type(
-               enum signal_type sink_signal)
+static enum ddc_transaction_type get_ddc_transaction_type(enum signal_type sink_signal)
 {
        enum ddc_transaction_type transaction_type = DDC_TRANSACTION_TYPE_NONE;
 
@@ -270,7 +271,8 @@ static enum ddc_transaction_type get_ddc_transaction_type(
        case SIGNAL_TYPE_DISPLAY_PORT_MST:
                /* MST does not use I2COverAux, but there is the
                 * SPECIAL use case for "immediate dwnstrm device
-                * access" (EPR#370830). */
+                * access" (EPR#370830).
+                */
                transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
                break;
 
@@ -281,9 +283,8 @@ static enum ddc_transaction_type get_ddc_transaction_type(
        return transaction_type;
 }
 
-static enum signal_type get_basic_signal_type(
-       struct graphics_object_id encoder,
-       struct graphics_object_id downstream)
+static enum signal_type get_basic_signal_type(struct graphics_object_id encoder,
+                                             struct graphics_object_id downstream)
 {
        if (downstream.type == OBJECT_TYPE_CONNECTOR) {
                switch (downstream.id) {
@@ -369,10 +370,11 @@ bool dc_link_is_dp_sink_present(struct dc_link *link)
        /* Open GPIO and set it to I2C mode */
        /* Note: this GpioMode_Input will be converted
         * to GpioConfigType_I2cAuxDualMode in GPIO component,
-        * which indicates we need additional delay */
+        * which indicates we need additional delay
+        */
 
-       if (GPIO_RESULT_OK != dal_ddc_open(
-               ddc, GPIO_MODE_INPUT, GPIO_DDC_CONFIG_TYPE_MODE_I2C)) {
+       if (dal_ddc_open(ddc, GPIO_MODE_INPUT,
+                        GPIO_DDC_CONFIG_TYPE_MODE_I2C) != GPIO_RESULT_OK) {
                dal_ddc_close(ddc);
 
                return present;
@@ -406,25 +408,25 @@ bool dc_link_is_dp_sink_present(struct dc_link *link)
  * @brief
  * Detect output sink type
  */
-static enum signal_type link_detect_sink(
-       struct dc_link *link,
-       enum dc_detect_reason reason)
+static enum signal_type link_detect_sink(struct dc_link *link,
+                                        enum dc_detect_reason reason)
 {
-       enum signal_type result = get_basic_signal_type(
-               link->link_enc->id, link->link_id);
+       enum signal_type result = get_basic_signal_type(link->link_enc->id,
+                                                       link->link_id);
 
        /* Internal digital encoder will detect only dongles
-        * that require digital signal */
+        * that require digital signal
+        */
 
        /* Detection mechanism is different
         * for different native connectors.
         * LVDS connector supports only LVDS signal;
         * PCIE is a bus slot, the actual connector needs to be detected first;
         * eDP connector supports only eDP signal;
-        * HDMI should check straps for audio */
+        * HDMI should check straps for audio
+        */
 
        /* PCIE detects the actual connector on add-on board */
-
        if (link->link_id.id == CONNECTOR_ID_PCIE) {
                /* ZAZTODO implement PCIE add-on card detection */
        }
@@ -432,8 +434,10 @@ static enum signal_type link_detect_sink(
        switch (link->link_id.id) {
        case CONNECTOR_ID_HDMI_TYPE_A: {
                /* check audio support:
-                * if native HDMI is not supported, switch to DVI */
-               struct audio_support *aud_support = &link->dc->res_pool->audio_support;
+                * if native HDMI is not supported, switch to DVI
+                */
+               struct audio_support *aud_support =
+                                       &link->dc->res_pool->audio_support;
 
                if (!aud_support->hdmi_audio_native)
                        if (link->link_id.id == CONNECTOR_ID_HDMI_TYPE_A)
@@ -461,16 +465,15 @@ static enum signal_type link_detect_sink(
        return result;
 }
 
-static enum signal_type decide_signal_from_strap_and_dongle_type(
-               enum display_dongle_type dongle_type,
-               struct audio_support *audio_support)
+static enum signal_type decide_signal_from_strap_and_dongle_type(enum display_dongle_type dongle_type,
+                                                                struct audio_support *audio_support)
 {
        enum signal_type signal = SIGNAL_TYPE_NONE;
 
        switch (dongle_type) {
        case DISPLAY_DONGLE_DP_HDMI_DONGLE:
                if (audio_support->hdmi_audio_on_dongle)
-                       signal =  SIGNAL_TYPE_HDMI_TYPE_A;
+                       signal = SIGNAL_TYPE_HDMI_TYPE_A;
                else
                        signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
                break;
@@ -491,16 +494,14 @@ static enum signal_type decide_signal_from_strap_and_dongle_type(
        return signal;
 }
 
-static enum signal_type dp_passive_dongle_detection(
-               struct ddc_service *ddc,
-               struct display_sink_capability *sink_cap,
-               struct audio_support *audio_support)
+static enum signal_type dp_passive_dongle_detection(struct ddc_service *ddc,
+                                                   struct display_sink_capability *sink_cap,
+                                                   struct audio_support *audio_support)
 {
-       dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
-                                               ddc, sink_cap);
-       return decide_signal_from_strap_and_dongle_type(
-                       sink_cap->dongle_type,
-                       audio_support);
+       dal_ddc_service_i2c_query_dp_dual_mode_adaptor(ddc, sink_cap);
+
+       return decide_signal_from_strap_and_dongle_type(sink_cap->dongle_type,
+                                                       audio_support);
 }
 
 static void link_disconnect_sink(struct dc_link *link)
@@ -519,6 +520,96 @@ static void link_disconnect_remap(struct dc_sink *prev_sink, struct dc_link *lin
        link->local_sink = prev_sink;
 }
 
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+bool dc_link_is_hdcp14(struct dc_link *link)
+{
+       bool ret = false;
+
+       switch (link->connector_signal) {
+       case SIGNAL_TYPE_DISPLAY_PORT:
+       case SIGNAL_TYPE_DISPLAY_PORT_MST:
+               ret = link->hdcp_caps.bcaps.bits.HDCP_CAPABLE;
+               break;
+       case SIGNAL_TYPE_DVI_SINGLE_LINK:
+       case SIGNAL_TYPE_DVI_DUAL_LINK:
+       case SIGNAL_TYPE_HDMI_TYPE_A:
+       /* HDMI doesn't tell us its HDCP(1.4) capability, so assume to always be capable,
+        * we can poll for bksv but some displays have an issue with this. Since its so rare
+        * for a display to not be 1.4 capable, this assumtion is ok
+        */
+               ret = true;
+               break;
+       default:
+               break;
+       }
+       return ret;
+}
+
+bool dc_link_is_hdcp22(struct dc_link *link)
+{
+       bool ret = false;
+
+       switch (link->connector_signal) {
+       case SIGNAL_TYPE_DISPLAY_PORT:
+       case SIGNAL_TYPE_DISPLAY_PORT_MST:
+               ret = (link->hdcp_caps.bcaps.bits.HDCP_CAPABLE &&
+                               link->hdcp_caps.rx_caps.fields.byte0.hdcp_capable &&
+                               (link->hdcp_caps.rx_caps.fields.version == 0x2)) ? 1 : 0;
+               break;
+       case SIGNAL_TYPE_DVI_SINGLE_LINK:
+       case SIGNAL_TYPE_DVI_DUAL_LINK:
+       case SIGNAL_TYPE_HDMI_TYPE_A:
+               ret = (link->hdcp_caps.rx_caps.fields.version == 0x4) ? 1:0;
+               break;
+       default:
+               break;
+       }
+
+       return ret;
+}
+
+static void query_hdcp_capability(enum signal_type signal, struct dc_link *link)
+{
+       struct hdcp_protection_message msg22;
+       struct hdcp_protection_message msg14;
+
+       memset(&msg22, 0, sizeof(struct hdcp_protection_message));
+       memset(&msg14, 0, sizeof(struct hdcp_protection_message));
+       memset(link->hdcp_caps.rx_caps.raw, 0,
+               sizeof(link->hdcp_caps.rx_caps.raw));
+
+       if ((link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
+                       link->ddc->transaction_type ==
+                       DDC_TRANSACTION_TYPE_I2C_OVER_AUX) ||
+                       link->connector_signal == SIGNAL_TYPE_EDP) {
+               msg22.data = link->hdcp_caps.rx_caps.raw;
+               msg22.length = sizeof(link->hdcp_caps.rx_caps.raw);
+               msg22.msg_id = HDCP_MESSAGE_ID_RX_CAPS;
+       } else {
+               msg22.data = &link->hdcp_caps.rx_caps.fields.version;
+               msg22.length = sizeof(link->hdcp_caps.rx_caps.fields.version);
+               msg22.msg_id = HDCP_MESSAGE_ID_HDCP2VERSION;
+       }
+       msg22.version = HDCP_VERSION_22;
+       msg22.link = HDCP_LINK_PRIMARY;
+       msg22.max_retries = 5;
+       dc_process_hdcp_msg(signal, link, &msg22);
+
+       if (signal == SIGNAL_TYPE_DISPLAY_PORT || signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+               enum hdcp_message_status status = HDCP_MESSAGE_UNSUPPORTED;
+
+               msg14.data = &link->hdcp_caps.bcaps.raw;
+               msg14.length = sizeof(link->hdcp_caps.bcaps.raw);
+               msg14.msg_id = HDCP_MESSAGE_ID_READ_BCAPS;
+               msg14.version = HDCP_VERSION_14;
+               msg14.link = HDCP_LINK_PRIMARY;
+               msg14.max_retries = 5;
+
+               status = dc_process_hdcp_msg(signal, link, &msg14);
+       }
+
+}
+#endif
 
 static void read_current_link_settings_on_detect(struct dc_link *link)
 {
@@ -532,18 +623,18 @@ static void read_current_link_settings_on_detect(struct dc_link *link)
 
        // Read DPCD 00101h to find out the number of lanes currently set
        for (i = 0; i < read_dpcd_retry_cnt; i++) {
-               status = core_link_read_dpcd(
-                               link,
-                               DP_LANE_COUNT_SET,
-                               &lane_count_set.raw,
-                               sizeof(lane_count_set));
+               status = core_link_read_dpcd(link,
+                                            DP_LANE_COUNT_SET,
+                                            &lane_count_set.raw,
+                                            sizeof(lane_count_set));
                /* First DPCD read after VDD ON can fail if the particular board
                 * does not have HPD pin wired correctly. So if DPCD read fails,
                 * which it should never happen, retry a few times. Target worst
                 * case scenario of 80 ms.
                 */
                if (status == DC_OK) {
-                       link->cur_link_settings.lane_count = lane_count_set.bits.LANE_COUNT_SET;
+                       link->cur_link_settings.lane_count =
+                                       lane_count_set.bits.LANE_COUNT_SET;
                        break;
                }
 
@@ -552,7 +643,7 @@ static void read_current_link_settings_on_detect(struct dc_link *link)
 
        // Read DPCD 00100h to find if standard link rates are set
        core_link_read_dpcd(link, DP_LINK_BW_SET,
-                       &link_bw_set, sizeof(link_bw_set));
+                           &link_bw_set, sizeof(link_bw_set));
 
        if (link_bw_set == 0) {
                if (link->connector_signal == SIGNAL_TYPE_EDP) {
@@ -560,12 +651,12 @@ static void read_current_link_settings_on_detect(struct dc_link *link)
                         * Read DPCD 00115h to find the edp link rate set used
                         */
                        core_link_read_dpcd(link, DP_LINK_RATE_SET,
-                                       &link_rate_set, sizeof(link_rate_set));
+                                           &link_rate_set, sizeof(link_rate_set));
 
                        // edp_supported_link_rates_count = 0 for DP
                        if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
                                link->cur_link_settings.link_rate =
-                                               link->dpcd_caps.edp_supported_link_rates[link_rate_set];
+                                       link->dpcd_caps.edp_supported_link_rates[link_rate_set];
                                link->cur_link_settings.link_rate_set = link_rate_set;
                                link->cur_link_settings.use_link_rate_set = true;
                        }
@@ -579,7 +670,7 @@ static void read_current_link_settings_on_detect(struct dc_link *link)
        }
        // Read DPCD 00003h to find the max down spread.
        core_link_read_dpcd(link, DP_MAX_DOWNSPREAD,
-                       &max_down_spread.raw, sizeof(max_down_spread));
+                           &max_down_spread.raw, sizeof(max_down_spread));
        link->cur_link_settings.link_spread =
                max_down_spread.bits.MAX_DOWN_SPREAD ?
                LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED;
@@ -612,6 +703,12 @@ static bool detect_dp(struct dc_link *link,
                        dal_ddc_service_set_transaction_type(link->ddc,
                                                             sink_caps->transaction_type);
 
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+                       /* In case of fallback to SST when topology discovery below fails
+                        * HDCP caps will be querried again later by the upper layer (caller
+                        * of this function). */
+                       query_hdcp_capability(SIGNAL_TYPE_DISPLAY_PORT_MST, link);
+#endif
                        /*
                         * This call will initiate MST topology discovery. Which
                         * will detect MST ports and add new DRM connector DRM
@@ -683,12 +780,12 @@ static bool is_same_edid(struct dc_edid *old_edid, struct dc_edid *new_edid)
        if (new_edid->length == 0)
                return false;
 
-       return (memcmp(old_edid->raw_edid, new_edid->raw_edid, new_edid->length) == 0);
+       return (memcmp(old_edid->raw_edid,
+                      new_edid->raw_edid, new_edid->length) == 0);
 }
 
-static bool wait_for_alt_mode(struct dc_link *link)
+static bool wait_for_entering_dp_alt_mode(struct dc_link *link)
 {
-
        /**
         * something is terribly wrong if time out is > 200ms. (5Hz)
         * 500 microseconds * 400 tries us 200 ms
@@ -703,7 +800,7 @@ static bool wait_for_alt_mode(struct dc_link *link)
 
        DC_LOGGER_INIT(link->ctx->logger);
 
-       if (link->link_enc->funcs->is_in_alt_mode == NULL)
+       if (!link->link_enc->funcs->is_in_alt_mode)
                return true;
 
        is_in_alt_mode = link->link_enc->funcs->is_in_alt_mode(link->link_enc);
@@ -718,21 +815,21 @@ static bool wait_for_alt_mode(struct dc_link *link)
                udelay(sleep_time_in_microseconds);
                /* ask the link if alt mode is enabled, if so return ok */
                if (link->link_enc->funcs->is_in_alt_mode(link->link_enc)) {
-
                        finish_timestamp = dm_get_timestamp(link->ctx);
-                       time_taken_in_ns = dm_get_elapse_time_in_ns(
-                               link->ctx, finish_timestamp, enter_timestamp);
+                       time_taken_in_ns =
+                               dm_get_elapse_time_in_ns(link->ctx,
+                                                        finish_timestamp,
+                                                        enter_timestamp);
                        DC_LOG_WARNING("Alt mode entered finished after %llu ms\n",
                                       div_u64(time_taken_in_ns, 1000000));
                        return true;
                }
-
        }
        finish_timestamp = dm_get_timestamp(link->ctx);
        time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp,
                                                    enter_timestamp);
        DC_LOG_WARNING("Alt mode has timed out after %llu ms\n",
-                       div_u64(time_taken_in_ns, 1000000));
+                      div_u64(time_taken_in_ns, 1000000));
        return false;
 }
 
@@ -768,30 +865,30 @@ static bool dc_link_detect_helper(struct dc_link *link,
                return false;
 
        if ((link->connector_signal == SIGNAL_TYPE_LVDS ||
-                       link->connector_signal == SIGNAL_TYPE_EDP) &&
-                       link->local_sink) {
-
+            link->connector_signal == SIGNAL_TYPE_EDP) &&
+           link->local_sink) {
                // need to re-write OUI and brightness in resume case
                if (link->connector_signal == SIGNAL_TYPE_EDP) {
                        dpcd_set_source_specific_data(link);
-                       dc_link_set_default_brightness_aux(link); //TODO: use cached
+                       dc_link_set_default_brightness_aux(link);
+                       //TODO: use cached
                }
 
                return true;
        }
 
-       if (false == dc_link_detect_sink(link, &new_connection_type)) {
+       if (!dc_link_detect_sink(link, &new_connection_type)) {
                BREAK_TO_DEBUGGER();
                return false;
        }
 
        prev_sink = link->local_sink;
-       if (prev_sink != NULL) {
+       if (prev_sink) {
                dc_sink_retain(prev_sink);
                memcpy(&prev_dpcd_caps, &link->dpcd_caps, sizeof(struct dpcd_caps));
        }
-       link_disconnect_sink(link);
 
+       link_disconnect_sink(link);
        if (new_connection_type != dc_connection_none) {
                link->type = new_connection_type;
                link->link_state_valid = false;
@@ -838,35 +935,31 @@ static bool dc_link_detect_helper(struct dc_link *link,
                }
 
                case SIGNAL_TYPE_DISPLAY_PORT: {
-
                        /* wa HPD high coming too early*/
                        if (link->link_enc->features.flags.bits.DP_IS_USB_C == 1) {
-
                                /* if alt mode times out, return false */
-                               if (wait_for_alt_mode(link) == false) {
+                               if (!wait_for_entering_dp_alt_mode(link))
                                        return false;
-                               }
                        }
 
-                       if (!detect_dp(
-                               link,
-                               &sink_caps,
-                               &converter_disable_audio,
-                               aud_support, reason)) {
-                               if (prev_sink != NULL)
+                       if (!detect_dp(link, &sink_caps,
+                                      &converter_disable_audio,
+                                      aud_support, reason)) {
+                               if (prev_sink)
                                        dc_sink_release(prev_sink);
                                return false;
                        }
 
                        // Check if dpcp block is the same
-                       if (prev_sink != NULL) {
-                               if (memcmp(&link->dpcd_caps, &prev_dpcd_caps, sizeof(struct dpcd_caps)))
+                       if (prev_sink) {
+                               if (memcmp(&link->dpcd_caps, &prev_dpcd_caps,
+                                          sizeof(struct dpcd_caps)))
                                        same_dpcd = false;
                        }
                        /* Active dongle downstream unplug*/
                        if (link->type == dc_connection_active_dongle &&
-                               link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) {
-                               if (prev_sink != NULL)
+                           link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) {
+                               if (prev_sink)
                                        /* Downstream unplug */
                                        dc_sink_release(prev_sink);
                                return true;
@@ -874,7 +967,7 @@ static bool dc_link_detect_helper(struct dc_link *link,
 
                        if (link->type == dc_connection_mst_branch) {
                                LINK_INFO("link=%d, mst branch is now Connected\n",
-                                       link->link_index);
+                                         link->link_index);
                                /* Need to setup mst link_cap struct here
                                 * otherwise dc_link_detect() will leave mst link_cap
                                 * empty which leads to allocate_mst_payload() has "0"
@@ -882,15 +975,15 @@ static bool dc_link_detect_helper(struct dc_link *link,
                                 */
                                dp_verify_mst_link_cap(link);
 
-                               if (prev_sink != NULL)
+                               if (prev_sink)
                                        dc_sink_release(prev_sink);
                                return false;
                        }
 
                        // For seamless boot, to skip verify link cap, we read UEFI settings and set them as verified.
                        if (reason == DETECT_REASON_BOOT &&
-                                       dc_ctx->dc->config.power_down_display_on_boot == false &&
-                                       link->link_status.link_active == true)
+                           !dc_ctx->dc->config.power_down_display_on_boot &&
+                           link->link_status.link_active)
                                perform_dp_seamless_boot = true;
 
                        if (perform_dp_seamless_boot) {
@@ -903,24 +996,23 @@ static bool dc_link_detect_helper(struct dc_link *link,
 
                default:
                        DC_ERROR("Invalid connector type! signal:%d\n",
-                               link->connector_signal);
-                       if (prev_sink != NULL)
+                                link->connector_signal);
+                       if (prev_sink)
                                dc_sink_release(prev_sink);
                        return false;
                } /* switch() */
 
                if (link->dpcd_caps.sink_count.bits.SINK_COUNT)
-                       link->dpcd_sink_count = link->dpcd_caps.sink_count.
-                                       bits.SINK_COUNT;
+                       link->dpcd_sink_count =
+                               link->dpcd_caps.sink_count.bits.SINK_COUNT;
                else
                        link->dpcd_sink_count = 1;
 
-               dal_ddc_service_set_transaction_type(
-                                               link->ddc,
-                                               sink_caps.transaction_type);
+               dal_ddc_service_set_transaction_type(link->ddc,
+                                                    sink_caps.transaction_type);
 
-               link->aux_mode = dal_ddc_service_is_in_aux_transaction_mode(
-                               link->ddc);
+               link->aux_mode =
+                       dal_ddc_service_is_in_aux_transaction_mode(link->ddc);
 
                sink_init_data.link = link;
                sink_init_data.sink_signal = sink_caps.signal;
@@ -928,7 +1020,7 @@ static bool dc_link_detect_helper(struct dc_link *link,
                sink = dc_sink_create(&sink_init_data);
                if (!sink) {
                        DC_ERROR("Failed to create sink!\n");
-                       if (prev_sink != NULL)
+                       if (prev_sink)
                                dc_sink_release(prev_sink);
                        return false;
                }
@@ -939,10 +1031,8 @@ static bool dc_link_detect_helper(struct dc_link *link,
                /* dc_sink_create returns a new reference */
                link->local_sink = sink;
 
-               edid_status = dm_helpers_read_local_edid(
-                               link->ctx,
-                               link,
-                               sink);
+               edid_status = dm_helpers_read_local_edid(link->ctx,
+                                                        link, sink);
 
                switch (edid_status) {
                case EDID_BAD_CHECKSUM:
@@ -950,7 +1040,6 @@ static bool dc_link_detect_helper(struct dc_link *link,
                        break;
                case EDID_NO_RESPONSE:
                        DC_LOG_ERROR("No EDID read.\n");
-
                        /*
                         * Abort detection for non-DP connectors if we have
                         * no EDID
@@ -961,7 +1050,7 @@ static bool dc_link_detect_helper(struct dc_link *link,
                         */
                        if (dc_is_hdmi_signal(link->connector_signal) ||
                            dc_is_dvi_signal(link->connector_signal)) {
-                               if (prev_sink != NULL)
+                               if (prev_sink)
                                        dc_sink_release(prev_sink);
 
                                return false;
@@ -974,45 +1063,53 @@ static bool dc_link_detect_helper(struct dc_link *link,
                        link->ctx->dc->debug.disable_fec = true;
 
                // Check if edid is the same
-               if ((prev_sink != NULL) && ((edid_status == EDID_THE_SAME) || (edid_status == EDID_OK)))
-                       same_edid = is_same_edid(&prev_sink->dc_edid, &sink->dc_edid);
+               if ((prev_sink) &&
+                   (edid_status == EDID_THE_SAME || edid_status == EDID_OK))
+                       same_edid = is_same_edid(&prev_sink->dc_edid,
+                                                &sink->dc_edid);
 
                if (sink->edid_caps.panel_patch.skip_scdc_overwrite)
                        link->ctx->dc->debug.hdmi20_disable = true;
 
                if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
-                       sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
+                   sink_caps.transaction_type ==
+                   DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
                        /*
                         * TODO debug why Dell 2413 doesn't like
                         *  two link trainings
                         */
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+                       query_hdcp_capability(sink->sink_signal, link);
+#endif
 
                        // verify link cap for SST non-seamless boot
                        if (!perform_dp_seamless_boot)
                                dp_verify_link_cap_with_retries(link,
-                                               &link->reported_link_cap,
-                                               LINK_TRAINING_MAX_VERIFY_RETRY);
+                                                               &link->reported_link_cap,
+                                                               LINK_TRAINING_MAX_VERIFY_RETRY);
                } else {
                        // If edid is the same, then discard new sink and revert back to original sink
                        if (same_edid) {
                                link_disconnect_remap(prev_sink, link);
                                sink = prev_sink;
                                prev_sink = NULL;
-
                        }
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+                       query_hdcp_capability(sink->sink_signal, link);
+#endif
                }
 
                /* HDMI-DVI Dongle */
                if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A &&
-                               !sink->edid_caps.edid_hdmi)
+                   !sink->edid_caps.edid_hdmi)
                        sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
 
                /* Connectivity log: detection */
                for (i = 0; i < sink->dc_edid.length / DC_EDID_BLOCK_SIZE; i++) {
                        CONN_DATA_DETECT(link,
-                                       &sink->dc_edid.raw_edid[i * DC_EDID_BLOCK_SIZE],
-                                       DC_EDID_BLOCK_SIZE,
-                                       "%s: [Block %d] ", sink->edid_caps.display_name, i);
+                                        &sink->dc_edid.raw_edid[i * DC_EDID_BLOCK_SIZE],
+                                        DC_EDID_BLOCK_SIZE,
+                                        "%s: [Block %d] ", sink->edid_caps.display_name, i);
                }
 
                DC_LOG_DETECTION_EDID_PARSER("%s: "
@@ -1047,17 +1144,18 @@ static bool dc_link_detect_helper(struct dc_link *link,
                                sink->edid_caps.audio_modes[i].sample_rate,
                                sink->edid_caps.audio_modes[i].sample_size);
                }
-
        } else {
                /* From Connected-to-Disconnected. */
                if (link->type == dc_connection_mst_branch) {
                        LINK_INFO("link=%d, mst branch is now Disconnected\n",
-                               link->link_index);
+                                 link->link_index);
 
                        dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
 
                        link->mst_stream_alloc_table.stream_count = 0;
-                       memset(link->mst_stream_alloc_table.stream_allocations, 0, sizeof(link->mst_stream_alloc_table.stream_allocations));
+                       memset(link->mst_stream_alloc_table.stream_allocations,
+                              0,
+                              sizeof(link->mst_stream_alloc_table.stream_allocations));
                }
 
                link->type = dc_connection_none;
@@ -1071,16 +1169,15 @@ static bool dc_link_detect_helper(struct dc_link *link,
        }
 
        LINK_INFO("link=%d, dc_sink_in=%p is now %s prev_sink=%p dpcd same=%d edid same=%d\n",
-               link->link_index, sink,
-               (sink_caps.signal == SIGNAL_TYPE_NONE ?
-                       "Disconnected":"Connected"), prev_sink,
-                       same_dpcd, same_edid);
+                 link->link_index, sink,
+                 (sink_caps.signal ==
+                  SIGNAL_TYPE_NONE ? "Disconnected" : "Connected"),
+                 prev_sink, same_dpcd, same_edid);
 
-       if (prev_sink != NULL)
+       if (prev_sink)
                dc_sink_release(prev_sink);
 
        return true;
-
 }
 
 bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
@@ -1110,13 +1207,13 @@ bool dc_link_get_hpd_state(struct dc_link *dc_link)
        return state;
 }
 
-static enum hpd_source_id get_hpd_line(
-               struct dc_link *link)
+static enum hpd_source_id get_hpd_line(struct dc_link *link)
 {
        struct gpio *hpd;
        enum hpd_source_id hpd_id = HPD_SOURCEID_UNKNOWN;
 
-       hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
+       hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id,
+                          link->ctx->gpio_service);
 
        if (hpd) {
                switch (dal_irq_get_source(hpd)) {
@@ -1191,8 +1288,7 @@ static enum channel_id get_ddc_line(struct dc_link *link)
        return channel;
 }
 
-static enum transmitter translate_encoder_to_transmitter(
-       struct graphics_object_id encoder)
+static enum transmitter translate_encoder_to_transmitter(struct graphics_object_id encoder)
 {
        switch (encoder.id) {
        case ENCODER_ID_INTERNAL_UNIPHY:
@@ -1256,17 +1352,18 @@ static enum transmitter translate_encoder_to_transmitter(
        }
 }
 
-static bool dc_link_construct(
-       struct dc_link *link,
-       const struct link_init_data *init_params)
+static bool dc_link_construct(struct dc_link *link,
+                             const struct link_init_data *init_params)
 {
        uint8_t i;
        struct ddc_service_init_data ddc_service_init_data = { { 0 } };
        struct dc_context *dc_ctx = init_params->ctx;
        struct encoder_init_data enc_init_data = { 0 };
+       struct panel_cntl_init_data panel_cntl_init_data = { 0 };
        struct integrated_info info = {{{ 0 }}};
        struct dc_bios *bios = init_params->dc->ctx->dc_bios;
        const struct dc_vbios_funcs *bp_funcs = bios->funcs;
+
        DC_LOGGER_INIT(dc_ctx->logger);
 
        link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
@@ -1278,23 +1375,27 @@ static bool dc_link_construct(
        link->ctx = dc_ctx;
        link->link_index = init_params->link_index;
 
-       memset(&link->preferred_training_settings, 0, sizeof(struct dc_link_training_overrides));
-       memset(&link->preferred_link_setting, 0, sizeof(struct dc_link_settings));
+       memset(&link->preferred_training_settings, 0,
+              sizeof(struct dc_link_training_overrides));
+       memset(&link->preferred_link_setting, 0,
+              sizeof(struct dc_link_settings));
 
-       link->link_id = bios->funcs->get_connector_id(bios, init_params->connector_index);
+       link->link_id =
+               bios->funcs->get_connector_id(bios, init_params->connector_index);
 
        if (link->link_id.type != OBJECT_TYPE_CONNECTOR) {
                dm_output_to_console("%s: Invalid Connector ObjectID from Adapter Service for connector index:%d! type %d expected %d\n",
-                        __func__, init_params->connector_index,
-                        link->link_id.type, OBJECT_TYPE_CONNECTOR);
+                                    __func__, init_params->connector_index,
+                                    link->link_id.type, OBJECT_TYPE_CONNECTOR);
                goto create_fail;
        }
 
        if (link->dc->res_pool->funcs->link_init)
                link->dc->res_pool->funcs->link_init(link);
 
-       link->hpd_gpio = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
-       if (link->hpd_gpio != NULL) {
+       link->hpd_gpio = get_hpd_gpio(link->ctx->dc_bios, link->link_id,
+                                     link->ctx->gpio_service);
+       if (link->hpd_gpio) {
                dal_gpio_open(link->hpd_gpio, GPIO_MODE_INTERRUPT);
                dal_gpio_unlock_pin(link->hpd_gpio);
                link->irq_source_hpd = dal_irq_get_source(link->hpd_gpio);
@@ -1314,9 +1415,9 @@ static bool dc_link_construct(
                link->connector_signal = SIGNAL_TYPE_DVI_DUAL_LINK;
                break;
        case CONNECTOR_ID_DISPLAY_PORT:
-               link->connector_signal =        SIGNAL_TYPE_DISPLAY_PORT;
+               link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT;
 
-               if (link->hpd_gpio != NULL)
+               if (link->hpd_gpio)
                        link->irq_source_hpd_rx =
                                        dal_irq_get_rx_source(link->hpd_gpio);
 
@@ -1324,42 +1425,60 @@ static bool dc_link_construct(
        case CONNECTOR_ID_EDP:
                link->connector_signal = SIGNAL_TYPE_EDP;
 
-               if (link->hpd_gpio != NULL) {
+               if (link->hpd_gpio) {
                        link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
                        link->irq_source_hpd_rx =
                                        dal_irq_get_rx_source(link->hpd_gpio);
                }
+
                break;
        case CONNECTOR_ID_LVDS:
                link->connector_signal = SIGNAL_TYPE_LVDS;
                break;
        default:
-               DC_LOG_WARNING("Unsupported Connector type:%d!\n", link->link_id.id);
+               DC_LOG_WARNING("Unsupported Connector type:%d!\n",
+                              link->link_id.id);
                goto create_fail;
        }
 
        /* TODO: #DAL3 Implement id to str function.*/
        LINK_INFO("Connector[%d] description:"
-                       "signal %d\n",
-                       init_params->connector_index,
-                       link->connector_signal);
+                 "signal %d\n",
+                 init_params->connector_index,
+                 link->connector_signal);
 
        ddc_service_init_data.ctx = link->ctx;
        ddc_service_init_data.id = link->link_id;
        ddc_service_init_data.link = link;
        link->ddc = dal_ddc_service_create(&ddc_service_init_data);
 
-       if (link->ddc == NULL) {
+       if (!link->ddc) {
                DC_ERROR("Failed to create ddc_service!\n");
                goto ddc_create_fail;
        }
 
        link->ddc_hw_inst =
-               dal_ddc_get_line(
-                       dal_ddc_service_get_ddc_pin(link->ddc));
+               dal_ddc_get_line(dal_ddc_service_get_ddc_pin(link->ddc));
+
+
+       if (link->dc->res_pool->funcs->panel_cntl_create &&
+               (link->link_id.id == CONNECTOR_ID_EDP ||
+                       link->link_id.id == CONNECTOR_ID_LVDS)) {
+               panel_cntl_init_data.ctx = dc_ctx;
+               panel_cntl_init_data.inst = 0;
+               link->panel_cntl =
+                       link->dc->res_pool->funcs->panel_cntl_create(
+                                                               &panel_cntl_init_data);
+
+               if (link->panel_cntl == NULL) {
+                       DC_ERROR("Failed to create link panel_cntl!\n");
+                       goto panel_cntl_create_fail;
+               }
+       }
 
        enc_init_data.ctx = dc_ctx;
-       bp_funcs->get_src_obj(dc_ctx->dc_bios, link->link_id, 0, &enc_init_data.encoder);
+       bp_funcs->get_src_obj(dc_ctx->dc_bios, link->link_id, 0,
+                             &enc_init_data.encoder);
        enc_init_data.connector = link->link_id;
        enc_init_data.channel = get_ddc_line(link);
        enc_init_data.hpd_source = get_hpd_line(link);
@@ -1367,11 +1486,11 @@ static bool dc_link_construct(
        link->hpd_src = enc_init_data.hpd_source;
 
        enc_init_data.transmitter =
-                       translate_encoder_to_transmitter(enc_init_data.encoder);
-       link->link_enc = link->dc->res_pool->funcs->link_enc_create(
-                                                               &enc_init_data);
+               translate_encoder_to_transmitter(enc_init_data.encoder);
+       link->link_enc =
+               link->dc->res_pool->funcs->link_enc_create(&enc_init_data);
 
-       if (link->link_enc == NULL) {
+       if (!link->link_enc) {
                DC_ERROR("Failed to create link encoder!\n");
                goto link_enc_create_fail;
        }
@@ -1379,8 +1498,9 @@ static bool dc_link_construct(
        link->link_enc_hw_inst = link->link_enc->transmitter;
 
        for (i = 0; i < 4; i++) {
-               if (BP_RESULT_OK !=
-                               bp_funcs->get_device_tag(dc_ctx->dc_bios, link->link_id, i, &link->device_tag)) {
+               if (bp_funcs->get_device_tag(dc_ctx->dc_bios,
+                                            link->link_id, i,
+                                            &link->device_tag) != BP_RESULT_OK) {
                        DC_ERROR("Failed to find device tag!\n");
                        goto device_tag_fail;
                }
@@ -1388,13 +1508,14 @@ static bool dc_link_construct(
                /* Look for device tag that matches connector signal,
                 * CRT for rgb, LCD for other supported signal tyes
                 */
-               if (!bp_funcs->is_device_id_supported(dc_ctx->dc_bios, link->device_tag.dev_id))
+               if (!bp_funcs->is_device_id_supported(dc_ctx->dc_bios,
+                                                     link->device_tag.dev_id))
                        continue;
-               if (link->device_tag.dev_id.device_type == DEVICE_TYPE_CRT
-                       && link->connector_signal != SIGNAL_TYPE_RGB)
+               if (link->device_tag.dev_id.device_type == DEVICE_TYPE_CRT &&
+                   link->connector_signal != SIGNAL_TYPE_RGB)
                        continue;
-               if (link->device_tag.dev_id.device_type == DEVICE_TYPE_LCD
-                       && link->connector_signal == SIGNAL_TYPE_RGB)
+               if (link->device_tag.dev_id.device_type == DEVICE_TYPE_LCD &&
+                   link->connector_signal == SIGNAL_TYPE_RGB)
                        continue;
                break;
        }
@@ -1406,16 +1527,16 @@ static bool dc_link_construct(
        for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; i++) {
                struct external_display_path *path =
                        &info.ext_disp_conn_info.path[i];
-               if (path->device_connector_id.enum_id == link->link_id.enum_id
-                       && path->device_connector_id.id == link->link_id.id
-                       && path->device_connector_id.type == link->link_id.type) {
 
-                       if (link->device_tag.acpi_device != 0
-                               && path->device_acpi_enum == link->device_tag.acpi_device) {
+               if (path->device_connector_id.enum_id == link->link_id.enum_id &&
+                   path->device_connector_id.id == link->link_id.id &&
+                   path->device_connector_id.type == link->link_id.type) {
+                       if (link->device_tag.acpi_device != 0 &&
+                           path->device_acpi_enum == link->device_tag.acpi_device) {
                                link->ddi_channel_mapping = path->channel_mapping;
                                link->chip_caps = path->caps;
                        } else if (path->device_tag ==
-                                       link->device_tag.dev_id.raw_device_tag) {
+                                  link->device_tag.dev_id.raw_device_tag) {
                                link->ddi_channel_mapping = path->channel_mapping;
                                link->chip_caps = path->caps;
                        }
@@ -1431,15 +1552,20 @@ static bool dc_link_construct(
         */
        program_hpd_filter(link);
 
+       link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
+
        return true;
 device_tag_fail:
        link->link_enc->funcs->destroy(&link->link_enc);
 link_enc_create_fail:
+       if (link->panel_cntl != NULL)
+               link->panel_cntl->funcs->destroy(&link->panel_cntl);
+panel_cntl_create_fail:
        dal_ddc_service_destroy(&link->ddc);
 ddc_create_fail:
 create_fail:
 
-       if (link->hpd_gpio != NULL) {
+       if (link->hpd_gpio) {
                dal_gpio_destroy_irq(&link->hpd_gpio);
                link->hpd_gpio = NULL;
        }
@@ -2339,9 +2465,28 @@ enum dc_status dc_link_validate_mode_timing(
        return DC_OK;
 }
 
+static struct abm *get_abm_from_stream_res(const struct dc_link *link)
+{
+       int i;
+       struct dc *dc = link->ctx->dc;
+       struct abm *abm = NULL;
+
+       for (i = 0; i < MAX_PIPES; i++) {
+               struct pipe_ctx pipe_ctx = dc->current_state->res_ctx.pipe_ctx[i];
+               struct dc_stream_state *stream = pipe_ctx.stream;
+
+               if (stream && stream->link == link) {
+                       abm = pipe_ctx.stream_res.abm;
+                       break;
+               }
+       }
+       return abm;
+}
+
 int dc_link_get_backlight_level(const struct dc_link *link)
 {
-       struct abm *abm = link->ctx->dc->res_pool->abm;
+
+       struct abm *abm = get_abm_from_stream_res(link);
 
        if (abm == NULL || abm->funcs->get_current_backlight == NULL)
                return DC_ERROR_UNEXPECTED;
@@ -2349,71 +2494,63 @@ int dc_link_get_backlight_level(const struct dc_link *link)
        return (int) abm->funcs->get_current_backlight(abm);
 }
 
-bool dc_link_set_backlight_level(const struct dc_link *link,
-               uint32_t backlight_pwm_u16_16,
-               uint32_t frame_ramp)
+int dc_link_get_target_backlight_pwm(const struct dc_link *link)
 {
-       struct dc  *dc = link->ctx->dc;
-       struct abm *abm = dc->res_pool->abm;
-       struct dmcu *dmcu = dc->res_pool->dmcu;
-       unsigned int controller_id = 0;
-       bool use_smooth_brightness = true;
-       int i;
-       DC_LOGGER_INIT(link->ctx->logger);
+       struct abm *abm = get_abm_from_stream_res(link);
 
-       if ((dmcu == NULL) ||
-               (abm == NULL) ||
-               (abm->funcs->set_backlight_level_pwm == NULL))
-               return false;
+       if (abm == NULL || abm->funcs->get_target_backlight == NULL)
+               return DC_ERROR_UNEXPECTED;
 
-       use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
+       return (int) abm->funcs->get_target_backlight(abm);
+}
 
-       DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
-                       backlight_pwm_u16_16, backlight_pwm_u16_16);
+static struct pipe_ctx *get_pipe_from_link(const struct dc_link *link)
+{
+       int i;
+       struct dc *dc = link->ctx->dc;
+       struct pipe_ctx *pipe_ctx = NULL;
 
-       if (dc_is_embedded_signal(link->connector_signal)) {
-               for (i = 0; i < MAX_PIPES; i++) {
-                       if (dc->current_state->res_ctx.pipe_ctx[i].stream) {
-                               if (dc->current_state->res_ctx.
-                                               pipe_ctx[i].stream->link
-                                               == link) {
-                                       /* DMCU -1 for all controller id values,
-                                        * therefore +1 here
-                                        */
-                                       controller_id =
-                                               dc->current_state->
-                                               res_ctx.pipe_ctx[i].stream_res.tg->inst +
-                                               1;
-
-                                       /* Disable brightness ramping when the display is blanked
-                                        * as it can hang the DMCU
-                                        */
-                                       if (dc->current_state->res_ctx.pipe_ctx[i].plane_state == NULL)
-                                               frame_ramp = 0;
-                               }
+       for (i = 0; i < MAX_PIPES; i++) {
+               if (dc->current_state->res_ctx.pipe_ctx[i].stream) {
+                       if (dc->current_state->res_ctx.pipe_ctx[i].stream->link == link) {
+                               pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+                               break;
                        }
                }
-               abm->funcs->set_backlight_level_pwm(
-                               abm,
-                               backlight_pwm_u16_16,
-                               frame_ramp,
-                               controller_id,
-                               use_smooth_brightness);
        }
 
-       return true;
+       return pipe_ctx;
 }
 
-bool dc_link_set_abm_disable(const struct dc_link *link)
+bool dc_link_set_backlight_level(const struct dc_link *link,
+               uint32_t backlight_pwm_u16_16,
+               uint32_t frame_ramp)
 {
        struct dc  *dc = link->ctx->dc;
-       struct abm *abm = dc->res_pool->abm;
 
-       if ((abm == NULL) || (abm->funcs->set_backlight_level_pwm == NULL))
-               return false;
+       DC_LOGGER_INIT(link->ctx->logger);
+       DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
+                       backlight_pwm_u16_16, backlight_pwm_u16_16);
+
+       if (dc_is_embedded_signal(link->connector_signal)) {
+               struct pipe_ctx *pipe_ctx = get_pipe_from_link(link);
 
-       abm->funcs->set_abm_immediate_disable(abm);
+               if (pipe_ctx) {
+                       /* Disable brightness ramping when the display is blanked
+                        * as it can hang the DMCU
+                        */
+                       if (pipe_ctx->plane_state == NULL)
+                               frame_ramp = 0;
+               } else {
+                       ASSERT(false);
+                       return false;
+               }
 
+               dc->hwss.set_backlight_level(
+                               pipe_ctx,
+                               backlight_pwm_u16_16,
+                               frame_ramp);
+       }
        return true;
 }
 
@@ -2423,12 +2560,12 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, bool
        struct dmcu *dmcu = dc->res_pool->dmcu;
        struct dmub_psr *psr = dc->res_pool->psr;
 
-       if (psr != NULL && link->psr_feature_enabled)
+       if (psr != NULL && link->psr_settings.psr_feature_enabled)
                psr->funcs->psr_enable(psr, allow_active);
-       else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_feature_enabled)
+       else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_settings.psr_feature_enabled)
                dmcu->funcs->set_psr_enable(dmcu, allow_active, wait);
 
-       link->psr_allow_active = allow_active;
+       link->psr_settings.psr_allow_active = allow_active;
 
        return true;
 }
@@ -2439,9 +2576,9 @@ bool dc_link_get_psr_state(const struct dc_link *link, uint32_t *psr_state)
        struct dmcu *dmcu = dc->res_pool->dmcu;
        struct dmub_psr *psr = dc->res_pool->psr;
 
-       if (psr != NULL && link->psr_feature_enabled)
+       if (psr != NULL && link->psr_settings.psr_feature_enabled)
                psr->funcs->psr_get_state(psr, psr_state);
-       else if (dmcu != NULL && link->psr_feature_enabled)
+       else if (dmcu != NULL && link->psr_settings.psr_feature_enabled)
                dmcu->funcs->get_psr_state(dmcu, psr_state);
 
        return true;
@@ -2612,14 +2749,14 @@ bool dc_link_setup_psr(struct dc_link *link,
        psr_context->frame_delay = 0;
 
        if (psr)
-               link->psr_feature_enabled = psr->funcs->psr_copy_settings(psr, link, psr_context);
+               link->psr_settings.psr_feature_enabled = psr->funcs->psr_copy_settings(psr, link, psr_context);
        else
-               link->psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context);
+               link->psr_settings.psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context);
 
        /* psr_enabled == 0 indicates setup_psr did not succeed, but this
         * should not happen since firmware should be running at this point
         */
-       if (link->psr_feature_enabled == 0)
+       if (link->psr_settings.psr_feature_enabled == 0)
                ASSERT(0);
 
        return true;
@@ -2966,7 +3103,7 @@ void core_link_enable_stream(
        enum dc_status status;
        DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
 
-       if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) &&
+       if (!IS_DIAG_DC(dc->ctx->dce_environment) &&
                        dc_is_virtual_signal(pipe_ctx->stream->signal))
                return;
 
@@ -3040,6 +3177,18 @@ void core_link_enable_stream(
                if (pipe_ctx->stream->dpms_off)
                        return;
 
+               /* Have to setup DSC before DIG FE and BE are connected (which happens before the
+                * link training). This is to make sure the bandwidth sent to DIG BE won't be
+                * bigger than what the link and/or DIG BE can handle. VBID[6]/CompressedStream_flag
+                * will be automatically set at a later time when the video is enabled
+                * (DP_VID_STREAM_EN = 1).
+                */
+               if (pipe_ctx->stream->timing.flags.DSC) {
+                       if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
+                                       dc_is_virtual_signal(pipe_ctx->stream->signal))
+                               dp_set_dsc_enable(pipe_ctx, true);
+               }
+
                status = enable_link(state, pipe_ctx);
 
                if (status != DC_OK) {
@@ -3067,11 +3216,6 @@ void core_link_enable_stream(
                                        CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
                                        COLOR_DEPTH_UNDEFINED);
 
-               if (pipe_ctx->stream->timing.flags.DSC) {
-                       if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
-                                       dc_is_virtual_signal(pipe_ctx->stream->signal))
-                               dp_set_dsc_enable(pipe_ctx, true);
-               }
                dc->hwss.enable_stream(pipe_ctx);
 
                /* Set DPS PPS SDP (AKA "info frames") */
@@ -3101,6 +3245,10 @@ void core_link_enable_stream(
                        dp_set_dsc_enable(pipe_ctx, true);
 
        }
+
+       if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+               core_link_set_avmute(pipe_ctx, false);
+       }
 }
 
 void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
@@ -3109,10 +3257,14 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
        struct dc_stream_state *stream = pipe_ctx->stream;
        struct dc_link *link = stream->sink->link;
 
-       if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) &&
+       if (!IS_DIAG_DC(dc->ctx->dce_environment) &&
                        dc_is_virtual_signal(pipe_ctx->stream->signal))
                return;
 
+       if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+               core_link_set_avmute(pipe_ctx, true);
+       }
+
 #if defined(CONFIG_DRM_AMD_DC_HDCP)
        update_psp_stream_config(pipe_ctx, true);
 #endif
index 256889eed93e37dc0ee49de0b68a0055058a18cd..aefd29a440b52060781753360b97cb2d61711b4c 100644 (file)
@@ -599,7 +599,7 @@ bool dal_ddc_submit_aux_command(struct ddc_service *ddc,
        do {
                struct aux_payload current_payload;
                bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) >
-                       payload->length ? true : false;
+                       payload->length;
 
                current_payload.address = payload->address;
                current_payload.data = &payload->data[retrieved];
index caa090d0b6acc6549186f96a850edbf60ad46343..91cd884d6f25712cde81d7384a62700f85f0f740 100644 (file)
@@ -13,7 +13,6 @@
 #include "core_status.h"
 #include "dpcd_defs.h"
 
-#include "resource.h"
 #define DC_LOGGER \
        link->ctx->logger
 
@@ -1737,19 +1736,10 @@ bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down)
 
 static struct dc_link_settings get_max_link_cap(struct dc_link *link)
 {
-       /* Set Default link settings */
-       struct dc_link_settings max_link_cap = {LANE_COUNT_FOUR, LINK_RATE_HIGH,
-                       LINK_SPREAD_05_DOWNSPREAD_30KHZ, false, 0};
-
-       /* Higher link settings based on feature supported */
-       if (link->link_enc->features.flags.bits.IS_HBR2_CAPABLE)
-               max_link_cap.link_rate = LINK_RATE_HIGH2;
-
-       if (link->link_enc->features.flags.bits.IS_HBR3_CAPABLE)
-               max_link_cap.link_rate = LINK_RATE_HIGH3;
+       struct dc_link_settings max_link_cap = {0};
 
-       if (link->link_enc->funcs->get_max_link_cap)
-               link->link_enc->funcs->get_max_link_cap(link->link_enc, &max_link_cap);
+       /* get max link encoder capability */
+       link->link_enc->funcs->get_max_link_cap(link->link_enc, &max_link_cap);
 
        /* Lower link settings based on sink's link cap */
        if (link->reported_link_cap.lane_count < max_link_cap.lane_count)
@@ -2453,7 +2443,7 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link)
 {
        union dpcd_psr_configuration psr_configuration;
 
-       if (!link->psr_feature_enabled)
+       if (!link->psr_settings.psr_feature_enabled)
                return false;
 
        dm_helpers_dp_read_dpcd(
@@ -2557,7 +2547,7 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
        /* get phy test pattern and pattern parameters from DP receiver */
        core_link_read_dpcd(
                        link,
-                       DP_TEST_PHY_PATTERN,
+                       DP_PHY_TEST_PATTERN,
                        &dpcd_test_pattern.raw,
                        sizeof(dpcd_test_pattern));
        core_link_read_dpcd(
@@ -4267,7 +4257,7 @@ void dpcd_set_source_specific_data(struct dc_link *link)
 {
        const uint32_t post_oui_delay = 30; // 30ms
        uint8_t dspc = 0;
-       enum dc_status ret = DC_ERROR_UNEXPECTED;
+       enum dc_status ret;
 
        ret = core_link_read_dpcd(link, DP_DOWN_STREAM_PORT_COUNT, &dspc,
                                  sizeof(dspc));
index 51e0ee6e769507f04e7f8e54c734963a0cef6626..6590f51caefabb9d9b4749539638646e2179f8b3 100644 (file)
@@ -400,7 +400,7 @@ static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable)
        struct dc_stream_state *stream = pipe_ctx->stream;
        bool result = false;
 
-       if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
+       if (dc_is_virtual_signal(stream->signal) || IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
                result = true;
        else
                result = dm_helpers_dp_write_dsc_enable(dc->ctx, stream, enable);
index f4bcc71b2920c396cac6ff55186893c5720cdb69..0c5619364e7db97715b37c034032f0ce066eaf1a 100644 (file)
@@ -532,6 +532,24 @@ static inline void get_vp_scan_direction(
                *flip_horz_scan_dir = !*flip_horz_scan_dir;
 }
 
+int get_num_mpc_splits(struct pipe_ctx *pipe)
+{
+       int mpc_split_count = 0;
+       struct pipe_ctx *other_pipe = pipe->bottom_pipe;
+
+       while (other_pipe && other_pipe->plane_state == pipe->plane_state) {
+               mpc_split_count++;
+               other_pipe = other_pipe->bottom_pipe;
+       }
+       other_pipe = pipe->top_pipe;
+       while (other_pipe && other_pipe->plane_state == pipe->plane_state) {
+               mpc_split_count++;
+               other_pipe = other_pipe->top_pipe;
+       }
+
+       return mpc_split_count;
+}
+
 int get_num_odm_splits(struct pipe_ctx *pipe)
 {
        int odm_split_count = 0;
@@ -556,16 +574,11 @@ static void calculate_split_count_and_index(struct pipe_ctx *pipe_ctx, int *spli
                /*Check for mpc split*/
                struct pipe_ctx *split_pipe = pipe_ctx->top_pipe;
 
+               *split_count = get_num_mpc_splits(pipe_ctx);
                while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) {
                        (*split_idx)++;
-                       (*split_count)++;
                        split_pipe = split_pipe->top_pipe;
                }
-               split_pipe = pipe_ctx->bottom_pipe;
-               while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) {
-                       (*split_count)++;
-                       split_pipe = split_pipe->bottom_pipe;
-               }
        } else {
                /*Get odm split index*/
                struct pipe_ctx *split_pipe = pipe_ctx->prev_odm_pipe;
@@ -692,6 +705,9 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
        /* Round up, assume original video size always even dimensions */
        data->viewport_c.width = (data->viewport.width + vpc_div - 1) / vpc_div;
        data->viewport_c.height = (data->viewport.height + vpc_div - 1) / vpc_div;
+
+       data->viewport_unadjusted = data->viewport;
+       data->viewport_c_unadjusted = data->viewport_c;
 }
 
 static void calculate_recout(struct pipe_ctx *pipe_ctx)
@@ -1061,8 +1077,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
 
        calculate_viewport(pipe_ctx);
 
-       if (pipe_ctx->plane_res.scl_data.viewport.height < 16 ||
-               pipe_ctx->plane_res.scl_data.viewport.width < 16) {
+       if (pipe_ctx->plane_res.scl_data.viewport.height < 12 ||
+               pipe_ctx->plane_res.scl_data.viewport.width < 12) {
                if (store_h_border_left) {
                        restore_border_left_from_dst(pipe_ctx,
                                store_h_border_left);
@@ -1358,9 +1374,6 @@ bool dc_add_plane_to_context(
        dc_plane_state_retain(plane_state);
 
        while (head_pipe) {
-               tail_pipe = resource_get_tail_pipe(&context->res_ctx, head_pipe);
-               ASSERT(tail_pipe);
-
                free_pipe = acquire_free_pipe_for_head(context, pool, head_pipe);
 
        #if defined(CONFIG_DRM_AMD_DC_DCN)
@@ -1378,6 +1391,8 @@ bool dc_add_plane_to_context(
                free_pipe->plane_state = plane_state;
 
                if (head_pipe != free_pipe) {
+                       tail_pipe = resource_get_tail_pipe(&context->res_ctx, head_pipe);
+                       ASSERT(tail_pipe);
                        free_pipe->stream_res.tg = tail_pipe->stream_res.tg;
                        free_pipe->stream_res.abm = tail_pipe->stream_res.abm;
                        free_pipe->stream_res.opp = tail_pipe->stream_res.opp;
@@ -1545,35 +1560,6 @@ bool dc_add_all_planes_for_stream(
        return add_all_planes_for_stream(dc, stream, &set, 1, context);
 }
 
-
-static bool is_hdr_static_meta_changed(struct dc_stream_state *cur_stream,
-       struct dc_stream_state *new_stream)
-{
-       if (cur_stream == NULL)
-               return true;
-
-       if (memcmp(&cur_stream->hdr_static_metadata,
-                       &new_stream->hdr_static_metadata,
-                       sizeof(struct dc_info_packet)) != 0)
-               return true;
-
-       return false;
-}
-
-static bool is_vsc_info_packet_changed(struct dc_stream_state *cur_stream,
-               struct dc_stream_state *new_stream)
-{
-       if (cur_stream == NULL)
-               return true;
-
-       if (memcmp(&cur_stream->vsc_infopacket,
-                       &new_stream->vsc_infopacket,
-                       sizeof(struct dc_info_packet)) != 0)
-               return true;
-
-       return false;
-}
-
 static bool is_timing_changed(struct dc_stream_state *cur_stream,
                struct dc_stream_state *new_stream)
 {
@@ -1608,15 +1594,9 @@ static bool are_stream_backends_same(
        if (is_timing_changed(stream_a, stream_b))
                return false;
 
-       if (is_hdr_static_meta_changed(stream_a, stream_b))
-               return false;
-
        if (stream_a->dpms_off != stream_b->dpms_off)
                return false;
 
-       if (is_vsc_info_packet_changed(stream_a, stream_b))
-               return false;
-
        return true;
 }
 
@@ -1756,21 +1736,6 @@ static struct audio *find_first_free_audio(
        return 0;
 }
 
-bool resource_is_stream_unchanged(
-       struct dc_state *old_context, struct dc_stream_state *stream)
-{
-       int i;
-
-       for (i = 0; i < old_context->stream_count; i++) {
-               struct dc_stream_state *old_stream = old_context->streams[i];
-
-               if (are_stream_backends_same(old_stream, stream))
-                               return true;
-       }
-
-       return false;
-}
-
 /**
  * dc_add_stream_to_ctx() - Add a new dc_stream_state to a dc_state.
  */
@@ -2025,17 +1990,6 @@ enum dc_status resource_map_pool_resources(
        int pipe_idx = -1;
        struct dc_bios *dcb = dc->ctx->dc_bios;
 
-       /* TODO Check if this is needed */
-       /*if (!resource_is_stream_unchanged(old_context, stream)) {
-                       if (stream != NULL && old_context->streams[i] != NULL) {
-                               stream->bit_depth_params =
-                                               old_context->streams[i]->bit_depth_params;
-                               stream->clamping = old_context->streams[i]->clamping;
-                               continue;
-                       }
-               }
-       */
-
        calculate_phy_pix_clks(stream);
 
        /* TODO: Check Linux */
@@ -2718,19 +2672,16 @@ bool pipe_need_reprogram(
        if (is_timing_changed(pipe_ctx_old->stream, pipe_ctx->stream))
                return true;
 
-       if (is_hdr_static_meta_changed(pipe_ctx_old->stream, pipe_ctx->stream))
-               return true;
-
        if (pipe_ctx_old->stream->dpms_off != pipe_ctx->stream->dpms_off)
                return true;
 
-       if (is_vsc_info_packet_changed(pipe_ctx_old->stream, pipe_ctx->stream))
-               return true;
-
        if (false == pipe_ctx_old->stream->link->link_state_valid &&
                false == pipe_ctx_old->stream->dpms_off)
                return true;
 
+       if (pipe_ctx_old->stream_res.dsc != pipe_ctx->stream_res.dsc)
+               return true;
+
        return false;
 }
 
index a249a0e5edd0ff6b67894af5b557c561ac8f6200..9e16af22e4aafc4867b91967ef330dc840314283 100644 (file)
@@ -54,6 +54,7 @@ static bool dc_sink_construct(struct dc_sink *sink, const struct dc_sink_init_da
        sink->ctx = link->ctx;
        sink->dongle_max_pix_clk = init_params->dongle_max_pix_clk;
        sink->converter_disable_audio = init_params->converter_disable_audio;
+       sink->is_mst_legacy = init_params->sink_is_legacy;
        sink->dc_container_id = NULL;
        sink->sink_id = init_params->link->ctx->dc_sink_id_count;
        // increment dc_sink_id_count because we don't want two sinks with same ID
index 1935cf6601ebd03bb6dbd06689343fe82d9efe1a..85908561c7418a4a60ea1a0bfc44d87b5c395f5f 100644 (file)
@@ -29,6 +29,9 @@
 #include "dc_types.h"
 #include "grph_object_defs.h"
 #include "logger_types.h"
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+#include "hdcp_types.h"
+#endif
 #include "gpio_types.h"
 #include "link_service_types.h"
 #include "grph_object_ctrl_defs.h"
@@ -39,7 +42,7 @@
 #include "inc/hw/dmcu.h"
 #include "dml/display_mode_lib.h"
 
-#define DC_VER "3.2.76"
+#define DC_VER "3.2.84"
 
 #define MAX_SURFACES 3
 #define MAX_PLANES 6
@@ -95,6 +98,49 @@ struct dc_plane_cap {
        } max_downscale_factor;
 };
 
+// Color management caps (DPP and MPC)
+struct rom_curve_caps {
+       uint16_t srgb : 1;
+       uint16_t bt2020 : 1;
+       uint16_t gamma2_2 : 1;
+       uint16_t pq : 1;
+       uint16_t hlg : 1;
+};
+
+struct dpp_color_caps {
+       uint16_t dcn_arch : 1; // all DCE generations treated the same
+       // input lut is different than most LUTs, just plain 256-entry lookup
+       uint16_t input_lut_shared : 1; // shared with DGAM
+       uint16_t icsc : 1;
+       uint16_t dgam_ram : 1;
+       uint16_t post_csc : 1; // before gamut remap
+       uint16_t gamma_corr : 1;
+
+       // hdr_mult and gamut remap always available in DPP (in that order)
+       // 3d lut implies shaper LUT,
+       // it may be shared with MPC - check MPC:shared_3d_lut flag
+       uint16_t hw_3d_lut : 1;
+       uint16_t ogam_ram : 1; // blnd gam
+       uint16_t ocsc : 1;
+       struct rom_curve_caps dgam_rom_caps;
+       struct rom_curve_caps ogam_rom_caps;
+};
+
+struct mpc_color_caps {
+       uint16_t gamut_remap : 1;
+       uint16_t ogam_ram : 1;
+       uint16_t ocsc : 1;
+       uint16_t num_3dluts : 3; //3d lut always assumes a preceding shaper LUT
+       uint16_t shared_3d_lut:1; //can be in either DPP or MPC, but single instance
+
+       struct rom_curve_caps ogam_rom_caps;
+};
+
+struct dc_color_caps {
+       struct dpp_color_caps dpp;
+       struct mpc_color_caps mpc;
+};
+
 struct dc_caps {
        uint32_t max_streams;
        uint32_t max_links;
@@ -117,9 +163,9 @@ struct dc_caps {
        bool psp_setup_panel_mode;
        bool extended_aux_timeout_support;
        bool dmcub_support;
-       bool hw_3d_lut;
        enum dp_protocol_version max_dp_protocol_version;
        struct dc_plane_cap planes[MAX_PLANES];
+       struct dc_color_caps color;
 };
 
 struct dc_bug_wa {
@@ -230,7 +276,8 @@ struct dc_config {
        bool forced_clocks;
        bool disable_extended_timeout_support; // Used to disable extended timeout and lttpr feature as well
        bool multi_mon_pp_mclk_switch;
-       bool psr_on_dmub;
+       bool disable_dmcu;
+       bool enable_4to1MPC;
 };
 
 enum visual_confirm {
@@ -238,6 +285,7 @@ enum visual_confirm {
        VISUAL_CONFIRM_SURFACE = 1,
        VISUAL_CONFIRM_HDR = 2,
        VISUAL_CONFIRM_MPCTREE = 4,
+       VISUAL_CONFIRM_PSR = 5,
 };
 
 enum dcc_option {
@@ -429,6 +477,7 @@ struct dc_debug_options {
        bool enable_dmcub_surface_flip;
        bool usbc_combo_phy_reset_wa;
        bool disable_dsc;
+       bool enable_dram_clock_change_one_display_vactive;
 };
 
 struct dc_debug_data {
@@ -474,6 +523,7 @@ struct dc_bounding_box_overrides {
        int urgent_latency_ns;
        int percent_of_ideal_drambw;
        int dram_clock_change_latency_ns;
+       int dummy_clock_change_latency_ns;
        /* This forces a hard min on the DCFCLK we use
         * for DML.  Unlike the debug option for forcing
         * DCFCLK, this override affects watermark calculations
@@ -987,6 +1037,7 @@ struct dpcd_caps {
        union dpcd_fec_capability fec_cap;
        struct dpcd_dsc_capabilities dsc_caps;
        struct dc_lttpr_caps lttpr_caps;
+       struct psr_caps psr_caps;
 
 };
 
@@ -1004,6 +1055,35 @@ union dpcd_sink_ext_caps {
        uint8_t raw;
 };
 
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+union hdcp_rx_caps {
+       struct {
+               uint8_t version;
+               uint8_t reserved;
+               struct {
+                       uint8_t repeater        : 1;
+                       uint8_t hdcp_capable    : 1;
+                       uint8_t reserved        : 6;
+               } byte0;
+       } fields;
+       uint8_t raw[3];
+};
+
+union hdcp_bcaps {
+       struct {
+               uint8_t HDCP_CAPABLE:1;
+               uint8_t REPEATER:1;
+               uint8_t RESERVED:6;
+       } bits;
+       uint8_t raw;
+};
+
+struct hdcp_caps {
+       union hdcp_rx_caps rx_caps;
+       union hdcp_bcaps bcaps;
+};
+#endif
+
 #include "dc_link.h"
 
 /*******************************************************************************
@@ -1046,7 +1126,7 @@ struct dc_sink {
        void *priv;
        struct stereo_3d_features features_3d[TIMING_3D_FORMAT_MAX];
        bool converter_disable_audio;
-
+       bool is_mst_legacy;
        struct dc_sink_dsc_caps dsc_caps;
        struct dc_sink_fec_caps fec_caps;
 
@@ -1073,6 +1153,7 @@ struct dc_sink_init_data {
        struct dc_link *link;
        uint32_t dongle_max_pix_clk;
        bool converter_disable_audio;
+       bool sink_is_legacy;
 };
 
 struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params);
@@ -1104,9 +1185,16 @@ void dc_set_power_state(
                struct dc *dc,
                enum dc_acpi_cm_power_state power_state);
 void dc_resume(struct dc *dc);
-unsigned int dc_get_current_backlight_pwm(struct dc *dc);
-unsigned int dc_get_target_backlight_pwm(struct dc *dc);
 
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+/*
+ * HDCP Interfaces
+ */
+enum hdcp_message_status dc_process_hdcp_msg(
+               enum signal_type signal,
+               struct dc_link *link,
+               struct hdcp_protection_message *message_info);
+#endif
 bool dc_is_dmcu_initialized(struct dc *dc);
 
 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping);
index 59c298a6484f396c3872dcb9d6ca32b6baa12a32..eea2429ac67d8a93273e65d1820456795d692ca4 100644 (file)
@@ -25,7 +25,7 @@
 
 #include "dc.h"
 #include "dc_dmub_srv.h"
-#include "../dmub/inc/dmub_srv.h"
+#include "../dmub/dmub_srv.h"
 
 static void dc_dmub_srv_construct(struct dc_dmub_srv *dc_srv, struct dc *dc,
                                  struct dmub_srv *dmub)
@@ -58,7 +58,7 @@ void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv)
 }
 
 void dc_dmub_srv_cmd_queue(struct dc_dmub_srv *dc_dmub_srv,
-                          struct dmub_cmd_header *cmd)
+                          union dmub_rb_cmd *cmd)
 {
        struct dmub_srv *dmub = dc_dmub_srv->dmub;
        struct dc_context *dc_ctx = dc_dmub_srv->ctx;
index 754b6077539cf8a0b8f6ff3778106cc7d288f6b3..a3a09ccb6d266c20514eb30b8a03834e840737ca 100644 (file)
 #define _DMUB_DC_SRV_H_
 
 #include "os_types.h"
-#include "../dmub/inc/dmub_cmd.h"
+#include "dmub/dmub_srv.h"
 
 struct dmub_srv;
-struct dmub_cmd_header;
 
 struct dc_reg_helper_state {
        bool gather_in_progress;
@@ -49,7 +48,7 @@ struct dc_dmub_srv {
 };
 
 void dc_dmub_srv_cmd_queue(struct dc_dmub_srv *dc_dmub_srv,
-                          struct dmub_cmd_header *cmd);
+                          union dmub_rb_cmd *cmd);
 
 void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv);
 
index bb2730e9521ed3320f8de0ee8239cdad97f53445..af177c087d3b489057f6b30a13dd725a1dd21897 100644 (file)
@@ -740,5 +740,11 @@ struct dpcd_dsc_capabilities {
        union dpcd_dsc_ext_capabilities dsc_ext_caps;
 };
 
+/* These parameters are from PSR capabilities reported by Sink DPCD */
+struct psr_caps {
+       unsigned char psr_version;
+       unsigned int psr_rfb_setup_time;
+       bool psr_exit_link_training_required;
+};
 
 #endif /* DC_DP_TYPES_H */
index 737048d8a96c16cf2bf2b89763e243a80d2af8db..85a0170be5449158e03c3b997b4cb788b54320e4 100644 (file)
@@ -50,7 +50,7 @@ static inline void submit_dmub_read_modify_write(
        gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress;
        ctx->dmub_srv->reg_helper_offload.gather_in_progress = false;
 
-       dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header);
+       dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data);
 
        ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather;
 
@@ -73,7 +73,7 @@ static inline void submit_dmub_burst_write(
        gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress;
        ctx->dmub_srv->reg_helper_offload.gather_in_progress = false;
 
-       dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header);
+       dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data);
 
        ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather;
 
@@ -92,7 +92,7 @@ static inline void submit_dmub_reg_wait(
        gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress;
        ctx->dmub_srv->reg_helper_offload.gather_in_progress = false;
 
-       dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header);
+       dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data);
 
        memset(cmd_buf, 0, sizeof(*cmd_buf));
        offload->reg_seq_count = 0;
index 00ff5e98278c2f731957a08fcf0c4bb32ca5ed2e..f63fc25aa6c5475bad8adf3df8a36f4b356e1bc2 100644 (file)
@@ -66,6 +66,22 @@ struct time_stamp {
 struct link_trace {
        struct time_stamp time_stamp;
 };
+
+/* PSR feature flags */
+struct psr_settings {
+       bool psr_feature_enabled;               // PSR is supported by sink
+       bool psr_allow_active;                  // PSR is currently active
+       enum dc_psr_version psr_version;                // Internal PSR version, determined based on DPCD
+
+       /* These parameters are calculated in Driver,
+        * based on display timing and Sink capabilities.
+        * If VBLANK region is too small and Sink takes a long time
+        * to set up RFB, it may take an extra frame to enter PSR state.
+        */
+       bool psr_frame_capture_indication_req;
+       unsigned int psr_sdp_transmit_line_num_deadline;
+};
+
 /*
  * A link contains one or more sinks and their connected status.
  * The currently active signal type (HDMI, DP-SST, DP-MST) is also reported.
@@ -118,6 +134,7 @@ struct dc_link {
 
        struct dc_context *ctx;
 
+       struct panel_cntl *panel_cntl;
        struct link_encoder *link_enc;
        struct graphics_object_id link_id;
        union ddi_channel_mapping ddi_channel_mapping;
@@ -126,11 +143,14 @@ struct dc_link {
        uint32_t dongle_max_pix_clk;
        unsigned short chip_caps;
        unsigned int dpcd_sink_count;
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+       struct hdcp_caps hdcp_caps;
+#endif
        enum edp_revision edp_revision;
-       bool psr_feature_enabled;
-       bool psr_allow_active;
        union dpcd_sink_ext_caps dpcd_sink_ext_caps;
 
+       struct psr_settings psr_settings;
+
        /* MST record stream using this link */
        struct link_flags {
                bool dp_keep_receiver_powered;
@@ -197,7 +217,7 @@ bool dc_link_set_default_brightness_aux(struct dc_link *link);
 
 int dc_link_get_backlight_level(const struct dc_link *dc_link);
 
-bool dc_link_set_abm_disable(const struct dc_link *dc_link);
+int dc_link_get_target_backlight_pwm(const struct dc_link *link);
 
 bool dc_link_set_psr_allow_active(struct dc_link *dc_link, bool enable, bool wait);
 
@@ -290,6 +310,10 @@ bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type);
  * DPCD access interfaces
  */
 
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+bool dc_link_is_hdcp14(struct dc_link *link);
+bool dc_link_is_hdcp22(struct dc_link *link);
+#endif
 void dc_link_set_drive_settings(struct dc *dc,
                                struct link_training_settings *lt_settings,
                                const struct dc_link *link);
index a5c7ef47b8d3c7d680ebe54e70c273323c2c8a6d..49aad691e687e6d91cf85131bc1322b2b3cc18b5 100644 (file)
@@ -167,8 +167,6 @@ struct dc_stream_state {
 
        /* TODO: custom INFO packets */
        /* TODO: ABM info (DMCU) */
-       /* PSR info */
-       unsigned char psr_version;
        /* TODO: CEA VIC */
 
        /* DMCU info */
index 0d210104ba0a10bf66cabd196b0e305dbf4c2e18..f236da1c1859e0c4229cda26bd897ac73116e9c2 100644 (file)
@@ -862,4 +862,9 @@ struct dsc_dec_dpcd_caps {
        uint32_t branch_max_line_width;
 };
 
+enum dc_psr_version {
+       DC_PSR_VERSION_1                        = 0,
+       DC_PSR_VERSION_UNSUPPORTED              = 0xFFFFFFFF,
+};
+
 #endif /* DC_TYPES_H_ */
index fbfcff700971cae06a816eb2463d9b555a018df1..f704a8fd52e81bcbf0875581c33fa2b0db525685 100644 (file)
@@ -29,7 +29,7 @@
 DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \
 dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \
 dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \
-dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o dmub_psr.o
+dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o dmub_psr.o dmub_abm.o dce_panel_cntl.o
 
 AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE))
 
index b8a3fc505c9b6e1a881209bd9569a8e262b92a19..4e87e70237e3da3cb551a1365d481d8f22564f15 100644 (file)
@@ -55,7 +55,7 @@
 
 #define MCP_DISABLE_ABM_IMMEDIATELY 255
 
-static bool dce_abm_set_pipe(struct abm *abm, uint32_t controller_id)
+static bool dce_abm_set_pipe(struct abm *abm, uint32_t controller_id, uint32_t panel_inst)
 {
        struct dce_abm *abm_dce = TO_DCE_ABM(abm);
        uint32_t rampingBoundary = 0xFFFF;
@@ -83,125 +83,12 @@ static bool dce_abm_set_pipe(struct abm *abm, uint32_t controller_id)
        return true;
 }
 
-static unsigned int calculate_16_bit_backlight_from_pwm(struct dce_abm *abm_dce)
-{
-       uint64_t current_backlight;
-       uint32_t round_result;
-       uint32_t pwm_period_cntl, bl_period, bl_int_count;
-       uint32_t bl_pwm_cntl, bl_pwm, fractional_duty_cycle_en;
-       uint32_t bl_period_mask, bl_pwm_mask;
-
-       pwm_period_cntl = REG_READ(BL_PWM_PERIOD_CNTL);
-       REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, &bl_period);
-       REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, &bl_int_count);
-
-       bl_pwm_cntl = REG_READ(BL_PWM_CNTL);
-       REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, (uint32_t *)(&bl_pwm));
-       REG_GET(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, &fractional_duty_cycle_en);
-
-       if (bl_int_count == 0)
-               bl_int_count = 16;
-
-       bl_period_mask = (1 << bl_int_count) - 1;
-       bl_period &= bl_period_mask;
-
-       bl_pwm_mask = bl_period_mask << (16 - bl_int_count);
-
-       if (fractional_duty_cycle_en == 0)
-               bl_pwm &= bl_pwm_mask;
-       else
-               bl_pwm &= 0xFFFF;
-
-       current_backlight = bl_pwm << (1 + bl_int_count);
-
-       if (bl_period == 0)
-               bl_period = 0xFFFF;
-
-       current_backlight = div_u64(current_backlight, bl_period);
-       current_backlight = (current_backlight + 1) >> 1;
-
-       current_backlight = (uint64_t)(current_backlight) * bl_period;
-
-       round_result = (uint32_t)(current_backlight & 0xFFFFFFFF);
-
-       round_result = (round_result >> (bl_int_count-1)) & 1;
-
-       current_backlight >>= bl_int_count;
-       current_backlight += round_result;
-
-       return (uint32_t)(current_backlight);
-}
-
-static void driver_set_backlight_level(struct dce_abm *abm_dce,
-               uint32_t backlight_pwm_u16_16)
-{
-       uint32_t backlight_16bit;
-       uint32_t masked_pwm_period;
-       uint8_t bit_count;
-       uint64_t active_duty_cycle;
-       uint32_t pwm_period_bitcnt;
-
-       /*
-        * 1. Find  16 bit backlight active duty cycle, where 0 <= backlight
-        * active duty cycle <= backlight period
-        */
-
-       /* 1.1 Apply bitmask for backlight period value based on value of BITCNT
-        */
-       REG_GET_2(BL_PWM_PERIOD_CNTL,
-                       BL_PWM_PERIOD_BITCNT, &pwm_period_bitcnt,
-                       BL_PWM_PERIOD, &masked_pwm_period);
-
-       if (pwm_period_bitcnt == 0)
-               bit_count = 16;
-       else
-               bit_count = pwm_period_bitcnt;
-
-       /* e.g. maskedPwmPeriod = 0x24 when bitCount is 6 */
-       masked_pwm_period = masked_pwm_period & ((1 << bit_count) - 1);
-
-       /* 1.2 Calculate integer active duty cycle required upper 16 bits
-        * contain integer component, lower 16 bits contain fractional component
-        * of active duty cycle e.g. 0x21BDC0 = 0xEFF0 * 0x24
-        */
-       active_duty_cycle = backlight_pwm_u16_16 * masked_pwm_period;
-
-       /* 1.3 Calculate 16 bit active duty cycle from integer and fractional
-        * components shift by bitCount then mask 16 bits and add rounding bit
-        * from MSB of fraction e.g. 0x86F7 = ((0x21BDC0 >> 6) & 0xFFF) + 0
-        */
-       backlight_16bit = active_duty_cycle >> bit_count;
-       backlight_16bit &= 0xFFFF;
-       backlight_16bit += (active_duty_cycle >> (bit_count - 1)) & 0x1;
-
-       /*
-        * 2. Program register with updated value
-        */
-
-       /* 2.1 Lock group 2 backlight registers */
-
-       REG_UPDATE_2(BL_PWM_GRP1_REG_LOCK,
-                       BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, 1,
-                       BL_PWM_GRP1_REG_LOCK, 1);
-
-       // 2.2 Write new active duty cycle
-       REG_UPDATE(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, backlight_16bit);
-
-       /* 2.3 Unlock group 2 backlight registers */
-       REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
-                       BL_PWM_GRP1_REG_LOCK, 0);
-
-       /* 3 Wait for pending bit to be cleared */
-       REG_WAIT(BL_PWM_GRP1_REG_LOCK,
-                       BL_PWM_GRP1_REG_UPDATE_PENDING, 0,
-                       1, 10000);
-}
-
 static void dmcu_set_backlight_level(
        struct dce_abm *abm_dce,
        uint32_t backlight_pwm_u16_16,
        uint32_t frame_ramp,
-       uint32_t controller_id)
+       uint32_t controller_id,
+       uint32_t panel_id)
 {
        unsigned int backlight_8_bit = 0;
        uint32_t s2;
@@ -213,7 +100,7 @@ static void dmcu_set_backlight_level(
                // Take MSB of fractional part since backlight is not max
                backlight_8_bit = (backlight_pwm_u16_16 >> 8) & 0xFF;
 
-       dce_abm_set_pipe(&abm_dce->base, controller_id);
+       dce_abm_set_pipe(&abm_dce->base, controller_id, panel_id);
 
        /* waitDMCUReadyForCmd */
        REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT,
@@ -248,10 +135,9 @@ static void dmcu_set_backlight_level(
                        0, 1, 80000);
 }
 
-static void dce_abm_init(struct abm *abm)
+static void dce_abm_init(struct abm *abm, uint32_t backlight)
 {
        struct dce_abm *abm_dce = TO_DCE_ABM(abm);
-       unsigned int backlight = calculate_16_bit_backlight_from_pwm(abm_dce);
 
        REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x103);
        REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x101);
@@ -331,86 +217,12 @@ static bool dce_abm_set_level(struct abm *abm, uint32_t level)
        return true;
 }
 
-static bool dce_abm_immediate_disable(struct abm *abm)
+static bool dce_abm_immediate_disable(struct abm *abm, uint32_t panel_inst)
 {
-       struct dce_abm *abm_dce = TO_DCE_ABM(abm);
-
        if (abm->dmcu_is_running == false)
                return true;
 
-       dce_abm_set_pipe(abm, MCP_DISABLE_ABM_IMMEDIATELY);
-
-       abm->stored_backlight_registers.BL_PWM_CNTL =
-               REG_READ(BL_PWM_CNTL);
-       abm->stored_backlight_registers.BL_PWM_CNTL2 =
-               REG_READ(BL_PWM_CNTL2);
-       abm->stored_backlight_registers.BL_PWM_PERIOD_CNTL =
-               REG_READ(BL_PWM_PERIOD_CNTL);
-
-       REG_GET(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV,
-               &abm->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
-       return true;
-}
-
-static bool dce_abm_init_backlight(struct abm *abm)
-{
-       struct dce_abm *abm_dce = TO_DCE_ABM(abm);
-       uint32_t value;
-
-       /* It must not be 0, so we have to restore them
-        * Bios bug w/a - period resets to zero,
-        * restoring to cache values which is always correct
-        */
-       REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, &value);
-       if (value == 0 || value == 1) {
-               if (abm->stored_backlight_registers.BL_PWM_CNTL != 0) {
-                       REG_WRITE(BL_PWM_CNTL,
-                               abm->stored_backlight_registers.BL_PWM_CNTL);
-                       REG_WRITE(BL_PWM_CNTL2,
-                               abm->stored_backlight_registers.BL_PWM_CNTL2);
-                       REG_WRITE(BL_PWM_PERIOD_CNTL,
-                               abm->stored_backlight_registers.BL_PWM_PERIOD_CNTL);
-                       REG_UPDATE(LVTMA_PWRSEQ_REF_DIV,
-                               BL_PWM_REF_DIV,
-                               abm->stored_backlight_registers.
-                               LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
-               } else {
-                       /* TODO: Note: This should not really happen since VBIOS
-                        * should have initialized PWM registers on boot.
-                        */
-                       REG_WRITE(BL_PWM_CNTL, 0xC000FA00);
-                       REG_WRITE(BL_PWM_PERIOD_CNTL, 0x000C0FA0);
-               }
-       } else {
-               abm->stored_backlight_registers.BL_PWM_CNTL =
-                               REG_READ(BL_PWM_CNTL);
-               abm->stored_backlight_registers.BL_PWM_CNTL2 =
-                               REG_READ(BL_PWM_CNTL2);
-               abm->stored_backlight_registers.BL_PWM_PERIOD_CNTL =
-                               REG_READ(BL_PWM_PERIOD_CNTL);
-
-               REG_GET(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV,
-                               &abm->stored_backlight_registers.
-                               LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
-       }
-
-       /* Have driver take backlight control
-        * TakeBacklightControl(true)
-        */
-       value = REG_READ(BIOS_SCRATCH_2);
-       value |= ATOM_S2_VRI_BRIGHT_ENABLE;
-       REG_WRITE(BIOS_SCRATCH_2, value);
-
-       /* Enable the backlight output */
-       REG_UPDATE(BL_PWM_CNTL, BL_PWM_EN, 1);
-
-       /* Disable fractional pwm if configured */
-       REG_UPDATE(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN,
-                  abm->ctx->dc->config.disable_fractional_pwm ? 0 : 1);
-
-       /* Unlock group 2 backlight registers */
-       REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
-                       BL_PWM_GRP1_REG_LOCK, 0);
+       dce_abm_set_pipe(abm, MCP_DISABLE_ABM_IMMEDIATELY, panel_inst);
 
        return true;
 }
@@ -420,21 +232,18 @@ static bool dce_abm_set_backlight_level_pwm(
                unsigned int backlight_pwm_u16_16,
                unsigned int frame_ramp,
                unsigned int controller_id,
-               bool use_smooth_brightness)
+               unsigned int panel_inst)
 {
        struct dce_abm *abm_dce = TO_DCE_ABM(abm);
 
        DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
                        backlight_pwm_u16_16, backlight_pwm_u16_16);
 
-       /* If DMCU is in reset state, DMCU is uninitialized */
-       if (use_smooth_brightness)
-               dmcu_set_backlight_level(abm_dce,
-                               backlight_pwm_u16_16,
-                               frame_ramp,
-                               controller_id);
-       else
-               driver_set_backlight_level(abm_dce, backlight_pwm_u16_16);
+       dmcu_set_backlight_level(abm_dce,
+                       backlight_pwm_u16_16,
+                       frame_ramp,
+                       controller_id,
+                       panel_inst);
 
        return true;
 }
@@ -442,12 +251,12 @@ static bool dce_abm_set_backlight_level_pwm(
 static const struct abm_funcs dce_funcs = {
        .abm_init = dce_abm_init,
        .set_abm_level = dce_abm_set_level,
-       .init_backlight = dce_abm_init_backlight,
        .set_pipe = dce_abm_set_pipe,
        .set_backlight_level_pwm = dce_abm_set_backlight_level_pwm,
        .get_current_backlight = dce_abm_get_current_backlight,
        .get_target_backlight = dce_abm_get_target_backlight,
-       .set_abm_immediate_disable = dce_abm_immediate_disable
+       .init_abm_config = NULL,
+       .set_abm_immediate_disable = dce_abm_immediate_disable,
 };
 
 static void dce_abm_construct(
@@ -461,10 +270,6 @@ static void dce_abm_construct(
 
        base->ctx = ctx;
        base->funcs = &dce_funcs;
-       base->stored_backlight_registers.BL_PWM_CNTL = 0;
-       base->stored_backlight_registers.BL_PWM_CNTL2 = 0;
-       base->stored_backlight_registers.BL_PWM_PERIOD_CNTL = 0;
-       base->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV = 0;
        base->dmcu_is_running = false;
 
        abm_dce->regs = regs;
index ba0caaffa24bf37add2e3b654388dfc5df6d40c7..9718a4823372c12c47042498d577bf51b3bfbeba 100644 (file)
 #include "abm.h"
 
 #define ABM_COMMON_REG_LIST_DCE_BASE() \
-       SR(BL_PWM_PERIOD_CNTL), \
-       SR(BL_PWM_CNTL), \
-       SR(BL_PWM_CNTL2), \
-       SR(BL_PWM_GRP1_REG_LOCK), \
-       SR(LVTMA_PWRSEQ_REF_DIV), \
        SR(MASTER_COMM_CNTL_REG), \
        SR(MASTER_COMM_CMD_REG), \
        SR(MASTER_COMM_DATA_REG1)
        .field_name = reg_name ## __ ## field_name ## post_fix
 
 #define ABM_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) \
-       ABM_SF(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, mask_sh), \
-       ABM_SF(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, mask_sh), \
-       ABM_SF(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, mask_sh), \
-       ABM_SF(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, mask_sh), \
-       ABM_SF(BL_PWM_CNTL, BL_PWM_EN, mask_sh), \
-       ABM_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, mask_sh), \
-       ABM_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_LOCK, mask_sh), \
-       ABM_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_UPDATE_PENDING, mask_sh), \
-       ABM_SF(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV, mask_sh), \
        ABM_SF(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, mask_sh), \
        ABM_SF(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, mask_sh), \
        ABM_SF(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE1, mask_sh), \
        type ABM1_HG_REG_READ_MISSED_FRAME_CLEAR; \
        type ABM1_LS_REG_READ_MISSED_FRAME_CLEAR; \
        type ABM1_BL_REG_READ_MISSED_FRAME_CLEAR; \
-       type BL_PWM_PERIOD; \
-       type BL_PWM_PERIOD_BITCNT; \
-       type BL_ACTIVE_INT_FRAC_CNT; \
-       type BL_PWM_FRACTIONAL_EN; \
        type MASTER_COMM_INTERRUPT; \
        type MASTER_COMM_CMD_REG_BYTE0; \
        type MASTER_COMM_CMD_REG_BYTE1; \
-       type MASTER_COMM_CMD_REG_BYTE2; \
-       type BL_PWM_REF_DIV; \
-       type BL_PWM_EN; \
-       type BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN; \
-       type BL_PWM_GRP1_REG_LOCK; \
-       type BL_PWM_GRP1_REG_UPDATE_PENDING
+       type MASTER_COMM_CMD_REG_BYTE2
 
 struct dce_abm_shift {
        ABM_REG_FIELD_LIST(uint8_t);
@@ -201,10 +178,6 @@ struct dce_abm_mask {
 };
 
 struct dce_abm_registers {
-       uint32_t BL_PWM_PERIOD_CNTL;
-       uint32_t BL_PWM_CNTL;
-       uint32_t BL_PWM_CNTL2;
-       uint32_t LVTMA_PWRSEQ_REF_DIV;
        uint32_t DC_ABM1_HG_SAMPLE_RATE;
        uint32_t DC_ABM1_LS_SAMPLE_RATE;
        uint32_t BL1_PWM_BL_UPDATE_SAMPLE_RATE;
@@ -219,7 +192,6 @@ struct dce_abm_registers {
        uint32_t MASTER_COMM_CMD_REG;
        uint32_t MASTER_COMM_DATA_REG1;
        uint32_t BIOS_SCRATCH_2;
-       uint32_t BL_PWM_GRP1_REG_LOCK;
 };
 
 struct dce_abm {
index 2e992fbc0d71844f0d0008cf5afc76cc19aef242..d2ad0504b0de10370768615fabec9318b1490ccb 100644 (file)
@@ -1014,39 +1014,6 @@ struct pixel_rate_range_table_entry {
        unsigned short div_factor;
 };
 
-static const struct pixel_rate_range_table_entry video_optimized_pixel_rates[] = {
-       // /1.001 rates
-       {25170, 25180, 25200, 1000, 1001},      //25.2MHz   ->   25.17
-       {59340, 59350, 59400, 1000, 1001},      //59.4Mhz   ->   59.340
-       {74170, 74180, 74250, 1000, 1001},      //74.25Mhz  ->   74.1758
-       {125870, 125880, 126000, 1000, 1001},   //126Mhz    ->  125.87
-       {148350, 148360, 148500, 1000, 1001},   //148.5Mhz  ->  148.3516
-       {167830, 167840, 168000, 1000, 1001},   //168Mhz    ->  167.83
-       {222520, 222530, 222750, 1000, 1001},   //222.75Mhz ->  222.527
-       {257140, 257150, 257400, 1000, 1001},   //257.4Mhz  ->  257.1429
-       {296700, 296710, 297000, 1000, 1001},   //297Mhz    ->  296.7033
-       {342850, 342860, 343200, 1000, 1001},   //343.2Mhz  ->  342.857
-       {395600, 395610, 396000, 1000, 1001},   //396Mhz    ->  395.6
-       {409090, 409100, 409500, 1000, 1001},   //409.5Mhz  ->  409.091
-       {445050, 445060, 445500, 1000, 1001},   //445.5Mhz  ->  445.055
-       {467530, 467540, 468000, 1000, 1001},   //468Mhz    ->  467.5325
-       {519230, 519240, 519750, 1000, 1001},   //519.75Mhz ->  519.231
-       {525970, 525980, 526500, 1000, 1001},   //526.5Mhz  ->  525.974
-       {545450, 545460, 546000, 1000, 1001},   //546Mhz    ->  545.455
-       {593400, 593410, 594000, 1000, 1001},   //594Mhz    ->  593.4066
-       {623370, 623380, 624000, 1000, 1001},   //624Mhz    ->  623.377
-       {692300, 692310, 693000, 1000, 1001},   //693Mhz    ->  692.308
-       {701290, 701300, 702000, 1000, 1001},   //702Mhz    ->  701.2987
-       {791200, 791210, 792000, 1000, 1001},   //792Mhz    ->  791.209
-       {890100, 890110, 891000, 1000, 1001},   //891Mhz    ->  890.1099
-       {1186810, 1186820, 1188000, 1000, 1001},//1188Mhz   -> 1186.8131
-
-       // *1.001 rates
-       {27020, 27030, 27000, 1001, 1000}, //27Mhz
-       {54050, 54060, 54000, 1001, 1000}, //54Mhz
-       {108100, 108110, 108000, 1001, 1000},//108Mhz
-};
-
 static bool dcn20_program_pix_clk(
                struct clock_source *clock_source,
                struct pixel_clk_params *pix_clk_params,
index c5aa1f48593a6eba67b2019b31be21253492bc70..5479d959ec6269805c000476e3899f737945340b 100644 (file)
 
 #include "dc_types.h"
 
-#define BL_REG_LIST()\
-       SR(LVTMA_PWRSEQ_CNTL), \
-       SR(LVTMA_PWRSEQ_STATE)
-
 #define HWSEQ_DCEF_REG_LIST_DCE8() \
        .DCFE_CLOCK_CONTROL[0] = mmCRTC0_CRTC_DCFE_CLOCK_CONTROL, \
        .DCFE_CLOCK_CONTROL[1] = mmCRTC1_CRTC_DCFE_CLOCK_CONTROL, \
        SRII(BLND_CONTROL, BLND, 0),\
        SRII(BLND_CONTROL, BLND, 1),\
        SR(BLNDV_CONTROL),\
-       HWSEQ_PIXEL_RATE_REG_LIST(CRTC),\
-       BL_REG_LIST()
+       HWSEQ_PIXEL_RATE_REG_LIST(CRTC)
 
 #define HWSEQ_DCE8_REG_LIST() \
        HWSEQ_DCEF_REG_LIST_DCE8(), \
        HWSEQ_BLND_REG_LIST(), \
-       HWSEQ_PIXEL_RATE_REG_LIST(CRTC),\
-       BL_REG_LIST()
+       HWSEQ_PIXEL_RATE_REG_LIST(CRTC)
 
 #define HWSEQ_DCE10_REG_LIST() \
        HWSEQ_DCEF_REG_LIST(), \
        HWSEQ_BLND_REG_LIST(), \
-       HWSEQ_PIXEL_RATE_REG_LIST(CRTC), \
-       BL_REG_LIST()
+       HWSEQ_PIXEL_RATE_REG_LIST(CRTC)
 
 #define HWSEQ_ST_REG_LIST() \
        HWSEQ_DCE11_REG_LIST_BASE(), \
        SR(DCHUB_FB_LOCATION),\
        SR(DCHUB_AGP_BASE),\
        SR(DCHUB_AGP_BOT),\
-       SR(DCHUB_AGP_TOP), \
-       BL_REG_LIST()
+       SR(DCHUB_AGP_TOP)
 
 #define HWSEQ_VG20_REG_LIST() \
        HWSEQ_DCE120_REG_LIST(),\
 #define HWSEQ_DCE112_REG_LIST() \
        HWSEQ_DCE10_REG_LIST(), \
        HWSEQ_PIXEL_RATE_REG_LIST(CRTC), \
-       HWSEQ_PHYPLL_REG_LIST(CRTC), \
-       BL_REG_LIST()
+       HWSEQ_PHYPLL_REG_LIST(CRTC)
 
 #define HWSEQ_DCN_REG_LIST()\
        SR(REFCLK_CNTL), \
        SR(D3VGA_CONTROL), \
        SR(D4VGA_CONTROL), \
        SR(VGA_TEST_CONTROL), \
-       SR(DC_IP_REQUEST_CNTL), \
-       BL_REG_LIST()
+       SR(DC_IP_REQUEST_CNTL)
 
 #define HWSEQ_DCN2_REG_LIST()\
        HWSEQ_DCN_REG_LIST(), \
        SR(D4VGA_CONTROL), \
        SR(D5VGA_CONTROL), \
        SR(D6VGA_CONTROL), \
-       SR(DC_IP_REQUEST_CNTL), \
-       BL_REG_LIST()
+       SR(DC_IP_REQUEST_CNTL)
 
 #define HWSEQ_DCN21_REG_LIST()\
        HWSEQ_DCN_REG_LIST(), \
        SR(D4VGA_CONTROL), \
        SR(D5VGA_CONTROL), \
        SR(D6VGA_CONTROL), \
-       SR(DC_IP_REQUEST_CNTL), \
-       BL_REG_LIST()
+       SR(DC_IP_REQUEST_CNTL)
 
 struct dce_hwseq_registers {
-
-               /* Backlight registers */
-       uint32_t LVTMA_PWRSEQ_CNTL;
-       uint32_t LVTMA_PWRSEQ_STATE;
-
        uint32_t DCFE_CLOCK_CONTROL[6];
        uint32_t DCFEV_CLOCK_CONTROL;
        uint32_t DC_MEM_GLOBAL_PWR_REQ_CNTL;
@@ -465,26 +448,18 @@ struct dce_hwseq_registers {
        HWS_SF1(blk, PHYPLL_PIXEL_RATE_CNTL, PHYPLL_PIXEL_RATE_SOURCE, mask_sh),\
        HWS_SF1(blk, PHYPLL_PIXEL_RATE_CNTL, PIXEL_RATE_PLL_SOURCE, mask_sh)
 
-#define HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)\
-       HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh),\
-       HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, mask_sh),\
-       HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON_OVRD, mask_sh),\
-       HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
-
 #define HWSEQ_DCE8_MASK_SH_LIST(mask_sh)\
        .DCFE_CLOCK_ENABLE = CRTC_DCFE_CLOCK_CONTROL__CRTC_DCFE_CLOCK_ENABLE ## mask_sh, \
        HWS_SF(BLND_, V_UPDATE_LOCK, BLND_DCP_GRPH_V_UPDATE_LOCK, mask_sh),\
        HWS_SF(BLND_, V_UPDATE_LOCK, BLND_SCL_V_UPDATE_LOCK, mask_sh),\
        HWS_SF(BLND_, V_UPDATE_LOCK, BLND_DCP_GRPH_SURF_V_UPDATE_LOCK, mask_sh),\
        HWS_SF(BLND_, CONTROL, BLND_MODE, mask_sh),\
-       HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_),\
-       HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)
+       HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_)
 
 #define HWSEQ_DCE10_MASK_SH_LIST(mask_sh)\
        HWSEQ_DCEF_MASK_SH_LIST(mask_sh, DCFE_),\
        HWSEQ_BLND_MASK_SH_LIST(mask_sh, BLND_),\
-       HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_),\
-       HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)
+       HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_)
 
 #define HWSEQ_DCE11_MASK_SH_LIST(mask_sh)\
        HWSEQ_DCE10_MASK_SH_LIST(mask_sh),\
@@ -507,8 +482,7 @@ struct dce_hwseq_registers {
        HWSEQ_BLND_MASK_SH_LIST(mask_sh, BLND0_BLND_),\
        HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_),\
        HWSEQ_PHYPLL_MASK_SH_LIST(mask_sh, CRTC0_),\
-       HWSEQ_GFX9_DCHUB_MASK_SH_LIST(mask_sh),\
-       HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)
+       HWSEQ_GFX9_DCHUB_MASK_SH_LIST(mask_sh)
 
 #define HWSEQ_VG20_MASK_SH_LIST(mask_sh)\
        HWSEQ_DCE12_MASK_SH_LIST(mask_sh),\
@@ -570,8 +544,7 @@ struct dce_hwseq_registers {
        HWS_SF(, D3VGA_CONTROL, D3VGA_MODE_ENABLE, mask_sh),\
        HWS_SF(, D4VGA_CONTROL, D4VGA_MODE_ENABLE, mask_sh),\
        HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_ENABLE, mask_sh),\
-       HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh),\
-       HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)
+       HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh)
 
 #define HWSEQ_DCN2_MASK_SH_LIST(mask_sh)\
        HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
@@ -630,8 +603,7 @@ struct dce_hwseq_registers {
        HWS_SF(, DOMAIN19_PG_STATUS, DOMAIN19_PGFSM_PWR_STATUS, mask_sh), \
        HWS_SF(, DOMAIN20_PG_STATUS, DOMAIN20_PGFSM_PWR_STATUS, mask_sh), \
        HWS_SF(, DOMAIN21_PG_STATUS, DOMAIN21_PGFSM_PWR_STATUS, mask_sh), \
-       HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
-       HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)
+       HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh)
 
 #define HWSEQ_DCN21_MASK_SH_LIST(mask_sh)\
        HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
@@ -671,10 +643,7 @@ struct dce_hwseq_registers {
        HWS_SF(, DOMAIN16_PG_STATUS, DOMAIN16_PGFSM_PWR_STATUS, mask_sh), \
        HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN17_PGFSM_PWR_STATUS, mask_sh), \
        HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN18_PGFSM_PWR_STATUS, mask_sh), \
-       HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
-       HWSEQ_LVTMA_MASK_SH_LIST(mask_sh), \
-       HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
-       HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
+       HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh)
 
 #define HWSEQ_REG_FIELD_LIST(type) \
        type DCFE_CLOCK_ENABLE; \
@@ -706,11 +675,7 @@ struct dce_hwseq_registers {
        type PF_LFB_REGION;\
        type PF_MAX_REGION;\
        type ENABLE_L1_TLB;\
-       type SYSTEM_ACCESS_MODE;\
-       type LVTMA_BLON;\
-       type LVTMA_DIGON;\
-       type LVTMA_DIGON_OVRD;\
-       type LVTMA_PWRSEQ_TARGET_STATE_R;
+       type SYSTEM_ACCESS_MODE;
 
 #define HWSEQ_DCN_REG_FIELD_LIST(type) \
        type HUBP_VTG_SEL; \
index 8527cce81c6fb8f315d7d88f7cfa2ea0fa43bc5a..8d8c84c81b34e2105c0a320d9122ed5094beddc5 100644 (file)
@@ -118,7 +118,8 @@ static const struct link_encoder_funcs dce110_lnk_enc_funcs = {
        .enable_hpd = dce110_link_encoder_enable_hpd,
        .disable_hpd = dce110_link_encoder_disable_hpd,
        .is_dig_enabled = dce110_is_dig_enabled,
-       .destroy = dce110_link_encoder_destroy
+       .destroy = dce110_link_encoder_destroy,
+       .get_max_link_cap = dce110_link_encoder_get_max_link_cap
 };
 
 static enum bp_result link_transmitter_control(
@@ -1389,3 +1390,20 @@ void dce110_link_encoder_disable_hpd(struct link_encoder *enc)
 
        set_reg_field_value(value, 0, DC_HPD_CONTROL, DC_HPD_EN);
 }
+
+void dce110_link_encoder_get_max_link_cap(struct link_encoder *enc,
+       struct dc_link_settings *link_settings)
+{
+       /* Set Default link settings */
+       struct dc_link_settings max_link_cap = {LANE_COUNT_FOUR, LINK_RATE_HIGH,
+                       LINK_SPREAD_05_DOWNSPREAD_30KHZ, false, 0};
+
+       /* Higher link settings based on feature supported */
+       if (enc->features.flags.bits.IS_HBR2_CAPABLE)
+               max_link_cap.link_rate = LINK_RATE_HIGH2;
+
+       if (enc->features.flags.bits.IS_HBR3_CAPABLE)
+               max_link_cap.link_rate = LINK_RATE_HIGH3;
+
+       *link_settings = max_link_cap;
+}
index 3c9368df4093ac0f52177fb8a733f244c1f804d6..384389f0e2c313c1feb9e4e65176989d89266396 100644 (file)
@@ -271,4 +271,7 @@ void dce110_psr_program_secondary_packet(struct link_encoder *enc,
 
 bool dce110_is_dig_enabled(struct link_encoder *enc);
 
+void dce110_link_encoder_get_max_link_cap(struct link_encoder *enc,
+       struct dc_link_settings *link_settings);
+
 #endif /* __DC_LINK_ENCODER__DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
new file mode 100644 (file)
index 0000000..ebff9b1
--- /dev/null
@@ -0,0 +1,299 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+#include "core_types.h"
+#include "dc_dmub_srv.h"
+#include "panel_cntl.h"
+#include "dce_panel_cntl.h"
+#include "atom.h"
+
+#define TO_DCE_PANEL_CNTL(panel_cntl)\
+       container_of(panel_cntl, struct dce_panel_cntl, base)
+
+#define CTX \
+       dce_panel_cntl->base.ctx
+
+#define DC_LOGGER \
+       dce_panel_cntl->base.ctx->logger
+
+#define REG(reg)\
+       dce_panel_cntl->regs->reg
+
+#undef FN
+#define FN(reg_name, field_name) \
+       dce_panel_cntl->shift->field_name, dce_panel_cntl->mask->field_name
+
+static unsigned int calculate_16_bit_backlight_from_pwm(struct dce_panel_cntl *dce_panel_cntl)
+{
+       uint64_t current_backlight;
+       uint32_t round_result;
+       uint32_t pwm_period_cntl, bl_period, bl_int_count;
+       uint32_t bl_pwm_cntl, bl_pwm, fractional_duty_cycle_en;
+       uint32_t bl_period_mask, bl_pwm_mask;
+
+       pwm_period_cntl = REG_READ(BL_PWM_PERIOD_CNTL);
+       REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, &bl_period);
+       REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, &bl_int_count);
+
+       bl_pwm_cntl = REG_READ(BL_PWM_CNTL);
+       REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, (uint32_t *)(&bl_pwm));
+       REG_GET(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, &fractional_duty_cycle_en);
+
+       if (bl_int_count == 0)
+               bl_int_count = 16;
+
+       bl_period_mask = (1 << bl_int_count) - 1;
+       bl_period &= bl_period_mask;
+
+       bl_pwm_mask = bl_period_mask << (16 - bl_int_count);
+
+       if (fractional_duty_cycle_en == 0)
+               bl_pwm &= bl_pwm_mask;
+       else
+               bl_pwm &= 0xFFFF;
+
+       current_backlight = bl_pwm << (1 + bl_int_count);
+
+       if (bl_period == 0)
+               bl_period = 0xFFFF;
+
+       current_backlight = div_u64(current_backlight, bl_period);
+       current_backlight = (current_backlight + 1) >> 1;
+
+       current_backlight = (uint64_t)(current_backlight) * bl_period;
+
+       round_result = (uint32_t)(current_backlight & 0xFFFFFFFF);
+
+       round_result = (round_result >> (bl_int_count-1)) & 1;
+
+       current_backlight >>= bl_int_count;
+       current_backlight += round_result;
+
+       return (uint32_t)(current_backlight);
+}
+
+uint32_t dce_panel_cntl_hw_init(struct panel_cntl *panel_cntl)
+{
+       struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
+       uint32_t value;
+       uint32_t current_backlight;
+
+       /* It must not be 0, so we have to restore them
+        * Bios bug w/a - period resets to zero,
+        * restoring to cache values which is always correct
+        */
+       REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, &value);
+
+       if (value == 0 || value == 1) {
+               if (panel_cntl->stored_backlight_registers.BL_PWM_CNTL != 0) {
+                       REG_WRITE(BL_PWM_CNTL,
+                                       panel_cntl->stored_backlight_registers.BL_PWM_CNTL);
+                       REG_WRITE(BL_PWM_CNTL2,
+                                       panel_cntl->stored_backlight_registers.BL_PWM_CNTL2);
+                       REG_WRITE(BL_PWM_PERIOD_CNTL,
+                                       panel_cntl->stored_backlight_registers.BL_PWM_PERIOD_CNTL);
+                       REG_UPDATE(PWRSEQ_REF_DIV,
+                               BL_PWM_REF_DIV,
+                               panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
+               } else {
+                       /* TODO: Note: This should not really happen since VBIOS
+                        * should have initialized PWM registers on boot.
+                        */
+                       REG_WRITE(BL_PWM_CNTL, 0xC000FA00);
+                       REG_WRITE(BL_PWM_PERIOD_CNTL, 0x000C0FA0);
+               }
+       } else {
+               panel_cntl->stored_backlight_registers.BL_PWM_CNTL =
+                               REG_READ(BL_PWM_CNTL);
+               panel_cntl->stored_backlight_registers.BL_PWM_CNTL2 =
+                               REG_READ(BL_PWM_CNTL2);
+               panel_cntl->stored_backlight_registers.BL_PWM_PERIOD_CNTL =
+                               REG_READ(BL_PWM_PERIOD_CNTL);
+
+               REG_GET(PWRSEQ_REF_DIV, BL_PWM_REF_DIV,
+                               &panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
+       }
+
+       // Have driver take backlight control
+       // TakeBacklightControl(true)
+       value = REG_READ(BIOS_SCRATCH_2);
+       value |= ATOM_S2_VRI_BRIGHT_ENABLE;
+       REG_WRITE(BIOS_SCRATCH_2, value);
+
+       // Enable the backlight output
+       REG_UPDATE(BL_PWM_CNTL, BL_PWM_EN, 1);
+
+       // Unlock group 2 backlight registers
+       REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
+                       BL_PWM_GRP1_REG_LOCK, 0);
+
+       current_backlight = calculate_16_bit_backlight_from_pwm(dce_panel_cntl);
+
+       return current_backlight;
+}
+
+bool dce_is_panel_backlight_on(struct panel_cntl *panel_cntl)
+{
+       struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
+       uint32_t value;
+
+       REG_GET(PWRSEQ_CNTL, LVTMA_BLON, &value);
+
+       return value;
+}
+
+bool dce_is_panel_powered_on(struct panel_cntl *panel_cntl)
+{
+       struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
+       uint32_t pwr_seq_state, dig_on, dig_on_ovrd;
+
+       REG_GET(PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, &pwr_seq_state);
+
+       REG_GET_2(PWRSEQ_CNTL, LVTMA_DIGON, &dig_on, LVTMA_DIGON_OVRD, &dig_on_ovrd);
+
+       return (pwr_seq_state == 1) || (dig_on == 1 && dig_on_ovrd == 1);
+}
+
+void dce_store_backlight_level(struct panel_cntl *panel_cntl)
+{
+       struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
+
+       panel_cntl->stored_backlight_registers.BL_PWM_CNTL =
+               REG_READ(BL_PWM_CNTL);
+       panel_cntl->stored_backlight_registers.BL_PWM_CNTL2 =
+               REG_READ(BL_PWM_CNTL2);
+       panel_cntl->stored_backlight_registers.BL_PWM_PERIOD_CNTL =
+               REG_READ(BL_PWM_PERIOD_CNTL);
+
+       REG_GET(PWRSEQ_REF_DIV, BL_PWM_REF_DIV,
+               &panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
+}
+
+void dce_driver_set_backlight(struct panel_cntl *panel_cntl,
+               uint32_t backlight_pwm_u16_16)
+{
+       uint32_t backlight_16bit;
+       uint32_t masked_pwm_period;
+       uint8_t bit_count;
+       uint64_t active_duty_cycle;
+       uint32_t pwm_period_bitcnt;
+       struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
+
+       /*
+        * 1. Find  16 bit backlight active duty cycle, where 0 <= backlight
+        * active duty cycle <= backlight period
+        */
+
+       /* 1.1 Apply bitmask for backlight period value based on value of BITCNT
+        */
+       REG_GET_2(BL_PWM_PERIOD_CNTL,
+                       BL_PWM_PERIOD_BITCNT, &pwm_period_bitcnt,
+                       BL_PWM_PERIOD, &masked_pwm_period);
+
+       if (pwm_period_bitcnt == 0)
+               bit_count = 16;
+       else
+               bit_count = pwm_period_bitcnt;
+
+       /* e.g. maskedPwmPeriod = 0x24 when bitCount is 6 */
+       masked_pwm_period = masked_pwm_period & ((1 << bit_count) - 1);
+
+       /* 1.2 Calculate integer active duty cycle required upper 16 bits
+        * contain integer component, lower 16 bits contain fractional component
+        * of active duty cycle e.g. 0x21BDC0 = 0xEFF0 * 0x24
+        */
+       active_duty_cycle = backlight_pwm_u16_16 * masked_pwm_period;
+
+       /* 1.3 Calculate 16 bit active duty cycle from integer and fractional
+        * components shift by bitCount then mask 16 bits and add rounding bit
+        * from MSB of fraction e.g. 0x86F7 = ((0x21BDC0 >> 6) & 0xFFF) + 0
+        */
+       backlight_16bit = active_duty_cycle >> bit_count;
+       backlight_16bit &= 0xFFFF;
+       backlight_16bit += (active_duty_cycle >> (bit_count - 1)) & 0x1;
+
+       /*
+        * 2. Program register with updated value
+        */
+
+       /* 2.1 Lock group 2 backlight registers */
+
+       REG_UPDATE_2(BL_PWM_GRP1_REG_LOCK,
+                       BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, 1,
+                       BL_PWM_GRP1_REG_LOCK, 1);
+
+       // 2.2 Write new active duty cycle
+       REG_UPDATE(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, backlight_16bit);
+
+       /* 2.3 Unlock group 2 backlight registers */
+       REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
+                       BL_PWM_GRP1_REG_LOCK, 0);
+
+       /* 3 Wait for pending bit to be cleared */
+       REG_WAIT(BL_PWM_GRP1_REG_LOCK,
+                       BL_PWM_GRP1_REG_UPDATE_PENDING, 0,
+                       1, 10000);
+}
+
+static void dce_panel_cntl_destroy(struct panel_cntl **panel_cntl)
+{
+       struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(*panel_cntl);
+
+       kfree(dce_panel_cntl);
+       *panel_cntl = NULL;
+}
+
+static const struct panel_cntl_funcs dce_link_panel_cntl_funcs = {
+       .destroy = dce_panel_cntl_destroy,
+       .hw_init = dce_panel_cntl_hw_init,
+       .is_panel_backlight_on = dce_is_panel_backlight_on,
+       .is_panel_powered_on = dce_is_panel_powered_on,
+       .store_backlight_level = dce_store_backlight_level,
+       .driver_set_backlight = dce_driver_set_backlight,
+};
+
+void dce_panel_cntl_construct(
+       struct dce_panel_cntl *dce_panel_cntl,
+       const struct panel_cntl_init_data *init_data,
+       const struct dce_panel_cntl_registers *regs,
+       const struct dce_panel_cntl_shift *shift,
+       const struct dce_panel_cntl_mask *mask)
+{
+       struct panel_cntl *base = &dce_panel_cntl->base;
+
+       base->stored_backlight_registers.BL_PWM_CNTL = 0;
+       base->stored_backlight_registers.BL_PWM_CNTL2 = 0;
+       base->stored_backlight_registers.BL_PWM_PERIOD_CNTL = 0;
+       base->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV = 0;
+
+       dce_panel_cntl->regs = regs;
+       dce_panel_cntl->shift = shift;
+       dce_panel_cntl->mask = mask;
+
+       dce_panel_cntl->base.funcs = &dce_link_panel_cntl_funcs;
+       dce_panel_cntl->base.ctx = init_data->ctx;
+       dce_panel_cntl->base.inst = init_data->inst;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h
new file mode 100644 (file)
index 0000000..70ec691
--- /dev/null
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ *  and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_PANEL_CNTL__DCE_H__
+#define __DC_PANEL_CNTL__DCE_H__
+
+#include "panel_cntl.h"
+
+/* set register offset with instance */
+#define DCE_PANEL_CNTL_SR(reg_name, block)\
+       .reg_name = mm ## block ## _ ## reg_name
+
+#define DCE_PANEL_CNTL_REG_LIST()\
+       DCE_PANEL_CNTL_SR(PWRSEQ_CNTL, LVTMA), \
+       DCE_PANEL_CNTL_SR(PWRSEQ_STATE, LVTMA), \
+       DCE_PANEL_CNTL_SR(PWRSEQ_REF_DIV, LVTMA), \
+       SR(BL_PWM_CNTL), \
+       SR(BL_PWM_CNTL2), \
+       SR(BL_PWM_PERIOD_CNTL), \
+       SR(BL_PWM_GRP1_REG_LOCK), \
+       SR(BIOS_SCRATCH_2)
+
+#define DCN_PANEL_CNTL_SR(reg_name, block)\
+       .reg_name = BASE(mm ## block ## _ ## reg_name ## _BASE_IDX) + \
+                                       mm ## block ## _ ## reg_name
+
+#define DCN_PANEL_CNTL_REG_LIST()\
+       DCN_PANEL_CNTL_SR(PWRSEQ_CNTL, LVTMA), \
+       DCN_PANEL_CNTL_SR(PWRSEQ_STATE, LVTMA), \
+       DCE_PANEL_CNTL_SR(PWRSEQ_REF_DIV, LVTMA), \
+       SR(BL_PWM_CNTL), \
+       SR(BL_PWM_CNTL2), \
+       SR(BL_PWM_PERIOD_CNTL), \
+       SR(BL_PWM_GRP1_REG_LOCK), \
+       SR(BIOS_SCRATCH_2)
+
+#define DCE_PANEL_CNTL_SF(reg_name, field_name, post_fix)\
+       .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define DCE_PANEL_CNTL_MASK_SH_LIST(mask_sh) \
+       DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh),\
+       DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, mask_sh),\
+       DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_CNTL, LVTMA_DIGON_OVRD, mask_sh),\
+       DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh), \
+       DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV, mask_sh), \
+       DCE_PANEL_CNTL_SF(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, mask_sh), \
+       DCE_PANEL_CNTL_SF(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, mask_sh), \
+       DCE_PANEL_CNTL_SF(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, mask_sh), \
+       DCE_PANEL_CNTL_SF(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, mask_sh), \
+       DCE_PANEL_CNTL_SF(BL_PWM_CNTL, BL_PWM_EN, mask_sh), \
+       DCE_PANEL_CNTL_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, mask_sh), \
+       DCE_PANEL_CNTL_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_LOCK, mask_sh), \
+       DCE_PANEL_CNTL_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_UPDATE_PENDING, mask_sh)
+
+#define DCE_PANEL_CNTL_REG_FIELD_LIST(type) \
+       type LVTMA_BLON;\
+       type LVTMA_DIGON;\
+       type LVTMA_DIGON_OVRD;\
+       type LVTMA_PWRSEQ_TARGET_STATE_R; \
+       type BL_PWM_REF_DIV; \
+       type BL_PWM_EN; \
+       type BL_ACTIVE_INT_FRAC_CNT; \
+       type BL_PWM_FRACTIONAL_EN; \
+       type BL_PWM_PERIOD; \
+       type BL_PWM_PERIOD_BITCNT; \
+       type BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN; \
+       type BL_PWM_GRP1_REG_LOCK; \
+       type BL_PWM_GRP1_REG_UPDATE_PENDING
+
+struct dce_panel_cntl_shift {
+       DCE_PANEL_CNTL_REG_FIELD_LIST(uint8_t);
+};
+
+struct dce_panel_cntl_mask {
+       DCE_PANEL_CNTL_REG_FIELD_LIST(uint32_t);
+};
+
+struct dce_panel_cntl_registers {
+       uint32_t PWRSEQ_CNTL;
+       uint32_t PWRSEQ_STATE;
+       uint32_t BL_PWM_CNTL;
+       uint32_t BL_PWM_CNTL2;
+       uint32_t BL_PWM_PERIOD_CNTL;
+       uint32_t BL_PWM_GRP1_REG_LOCK;
+       uint32_t PWRSEQ_REF_DIV;
+       uint32_t BIOS_SCRATCH_2;
+};
+
+struct dce_panel_cntl {
+       struct panel_cntl base;
+       const struct dce_panel_cntl_registers *regs;
+       const struct dce_panel_cntl_shift *shift;
+       const struct dce_panel_cntl_mask *mask;
+};
+
+void dce_panel_cntl_construct(
+       struct dce_panel_cntl *panel_cntl,
+       const struct panel_cntl_init_data *init_data,
+       const struct dce_panel_cntl_registers *regs,
+       const struct dce_panel_cntl_shift *shift,
+       const struct dce_panel_cntl_mask *mask);
+
+#endif /* __DC_PANEL_CNTL__DCE_H__ */
index 451574971b9641327742611311ba4ecd9d991ce6..4cdaaf4d881cc42122d597ca71e3fbcb5068b8ac 100644 (file)
@@ -1336,7 +1336,6 @@ static void dce110_se_audio_setup(
 {
        struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
 
-       uint32_t speakers = 0;
        uint32_t channels = 0;
 
        ASSERT(audio_info);
@@ -1344,7 +1343,6 @@ static void dce110_se_audio_setup(
                /* This should not happen.it does so we don't get BSOD*/
                return;
 
-       speakers = audio_info->flags.info.ALLSPEAKERS;
        channels = speakers_to_channels(audio_info->flags.speaker_flags).all;
 
        /* setup the audio stream source select (audio -> dig mapping) */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
new file mode 100644 (file)
index 0000000..da0b29a
--- /dev/null
@@ -0,0 +1,319 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dmub_abm.h"
+#include "dce_abm.h"
+#include "dc.h"
+#include "dc_dmub_srv.h"
+#include "dmub/dmub_srv.h"
+#include "core_types.h"
+#include "dm_services.h"
+#include "reg_helper.h"
+#include "fixed31_32.h"
+
+#include "atom.h"
+
+#define TO_DMUB_ABM(abm)\
+       container_of(abm, struct dce_abm, base)
+
+#define REG(reg) \
+       (dce_abm->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+       dce_abm->abm_shift->field_name, dce_abm->abm_mask->field_name
+
+#define CTX \
+       dce_abm->base.ctx
+
+#define DISABLE_ABM_IMMEDIATELY 255
+
+static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t panel_inst)
+{
+       union dmub_rb_cmd cmd;
+       struct dc_context *dc = abm->ctx;
+       uint32_t ramping_boundary = 0xFFFF;
+
+       cmd.abm_set_pipe.header.type = DMUB_CMD__ABM;
+       cmd.abm_set_pipe.header.sub_type = DMUB_CMD__ABM_SET_PIPE;
+       cmd.abm_set_pipe.abm_set_pipe_data.otg_inst = otg_inst;
+       cmd.abm_set_pipe.abm_set_pipe_data.panel_inst = panel_inst;
+       cmd.abm_set_pipe.abm_set_pipe_data.ramping_boundary = ramping_boundary;
+       cmd.abm_set_pipe.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pipe_data);
+
+       dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
+       dc_dmub_srv_cmd_execute(dc->dmub_srv);
+       dc_dmub_srv_wait_idle(dc->dmub_srv);
+
+       return true;
+}
+
+static void dmcub_set_backlight_level(
+       struct dce_abm *dce_abm,
+       uint32_t backlight_pwm_u16_16,
+       uint32_t frame_ramp,
+       uint32_t otg_inst,
+       uint32_t panel_inst)
+{
+       union dmub_rb_cmd cmd;
+       struct dc_context *dc = dce_abm->base.ctx;
+       unsigned int backlight_8_bit = 0;
+       uint32_t s2;
+
+       if (backlight_pwm_u16_16 & 0x10000)
+               // Check for max backlight condition
+               backlight_8_bit = 0xFF;
+       else
+               // Take MSB of fractional part since backlight is not max
+               backlight_8_bit = (backlight_pwm_u16_16 >> 8) & 0xFF;
+
+       dmub_abm_set_pipe(&dce_abm->base, otg_inst, panel_inst);
+
+       REG_UPDATE(BL1_PWM_USER_LEVEL, BL1_PWM_USER_LEVEL, backlight_pwm_u16_16);
+
+       if (otg_inst == 0)
+               frame_ramp = 0;
+
+       cmd.abm_set_backlight.header.type = DMUB_CMD__ABM;
+       cmd.abm_set_backlight.header.sub_type = DMUB_CMD__ABM_SET_BACKLIGHT;
+       cmd.abm_set_backlight.abm_set_backlight_data.frame_ramp = frame_ramp;
+       cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data);
+
+       dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
+       dc_dmub_srv_cmd_execute(dc->dmub_srv);
+       dc_dmub_srv_wait_idle(dc->dmub_srv);
+
+       // Update requested backlight level
+       s2 = REG_READ(BIOS_SCRATCH_2);
+
+       s2 &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK;
+       backlight_8_bit &= (ATOM_S2_CURRENT_BL_LEVEL_MASK >>
+                               ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
+       s2 |= (backlight_8_bit << ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
+
+       REG_WRITE(BIOS_SCRATCH_2, s2);
+}
+
+static void dmub_abm_enable_fractional_pwm(struct dc_context *dc)
+{
+       union dmub_rb_cmd cmd;
+       uint32_t fractional_pwm = (dc->dc->config.disable_fractional_pwm == false) ? 1 : 0;
+
+       cmd.abm_set_pwm_frac.header.type = DMUB_CMD__ABM;
+       cmd.abm_set_pwm_frac.header.sub_type = DMUB_CMD__ABM_SET_PWM_FRAC;
+       cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.fractional_pwm = fractional_pwm;
+       cmd.abm_set_pwm_frac.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pwm_frac_data);
+
+       dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
+       dc_dmub_srv_cmd_execute(dc->dmub_srv);
+       dc_dmub_srv_wait_idle(dc->dmub_srv);
+}
+
+static void dmub_abm_init(struct abm *abm, uint32_t backlight)
+{
+       struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
+
+       REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x103);
+       REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x101);
+       REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x103);
+       REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x101);
+       REG_WRITE(BL1_PWM_BL_UPDATE_SAMPLE_RATE, 0x101);
+
+       REG_SET_3(DC_ABM1_HG_MISC_CTRL, 0,
+                       ABM1_HG_NUM_OF_BINS_SEL, 0,
+                       ABM1_HG_VMAX_SEL, 1,
+                       ABM1_HG_BIN_BITWIDTH_SIZE_SEL, 0);
+
+       REG_SET_3(DC_ABM1_IPCSC_COEFF_SEL, 0,
+                       ABM1_IPCSC_COEFF_SEL_R, 2,
+                       ABM1_IPCSC_COEFF_SEL_G, 4,
+                       ABM1_IPCSC_COEFF_SEL_B, 2);
+
+       REG_UPDATE(BL1_PWM_CURRENT_ABM_LEVEL,
+                       BL1_PWM_CURRENT_ABM_LEVEL, backlight);
+
+       REG_UPDATE(BL1_PWM_TARGET_ABM_LEVEL,
+                       BL1_PWM_TARGET_ABM_LEVEL, backlight);
+
+       REG_UPDATE(BL1_PWM_USER_LEVEL,
+                       BL1_PWM_USER_LEVEL, backlight);
+
+       REG_UPDATE_2(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES,
+                       ABM1_LS_MIN_PIXEL_VALUE_THRES, 0,
+                       ABM1_LS_MAX_PIXEL_VALUE_THRES, 1000);
+
+       REG_SET_3(DC_ABM1_HGLS_REG_READ_PROGRESS, 0,
+                       ABM1_HG_REG_READ_MISSED_FRAME_CLEAR, 1,
+                       ABM1_LS_REG_READ_MISSED_FRAME_CLEAR, 1,
+                       ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, 1);
+
+       dmub_abm_enable_fractional_pwm(abm->ctx);
+}
+
+static unsigned int dmub_abm_get_current_backlight(struct abm *abm)
+{
+       struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
+       unsigned int backlight = REG_READ(BL1_PWM_CURRENT_ABM_LEVEL);
+
+       /* return backlight in hardware format which is unsigned 17 bits, with
+        * 1 bit integer and 16 bit fractional
+        */
+       return backlight;
+}
+
+static unsigned int dmub_abm_get_target_backlight(struct abm *abm)
+{
+       struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
+       unsigned int backlight = REG_READ(BL1_PWM_TARGET_ABM_LEVEL);
+
+       /* return backlight in hardware format which is unsigned 17 bits, with
+        * 1 bit integer and 16 bit fractional
+        */
+       return backlight;
+}
+
+static bool dmub_abm_set_level(struct abm *abm, uint32_t level)
+{
+       union dmub_rb_cmd cmd;
+       struct dc_context *dc = abm->ctx;
+
+       cmd.abm_set_level.header.type = DMUB_CMD__ABM;
+       cmd.abm_set_level.header.sub_type = DMUB_CMD__ABM_SET_LEVEL;
+       cmd.abm_set_level.abm_set_level_data.level = level;
+       cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_level_data);
+
+       dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
+       dc_dmub_srv_cmd_execute(dc->dmub_srv);
+       dc_dmub_srv_wait_idle(dc->dmub_srv);
+
+       return true;
+}
+
+static bool dmub_abm_immediate_disable(struct abm *abm, uint32_t panel_inst)
+{
+       dmub_abm_set_pipe(abm, DISABLE_ABM_IMMEDIATELY, panel_inst);
+
+       return true;
+}
+
+static bool dmub_abm_set_backlight_level_pwm(
+               struct abm *abm,
+               unsigned int backlight_pwm_u16_16,
+               unsigned int frame_ramp,
+               unsigned int otg_inst,
+               uint32_t panel_inst)
+{
+       struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
+
+       dmcub_set_backlight_level(dce_abm,
+                       backlight_pwm_u16_16,
+                       frame_ramp,
+                       otg_inst,
+                       panel_inst);
+
+       return true;
+}
+
+static bool dmub_abm_init_config(struct abm *abm,
+       const char *src,
+       unsigned int bytes)
+{
+       union dmub_rb_cmd cmd;
+       struct dc_context *dc = abm->ctx;
+
+       // TODO: Optimize by only reading back final 4 bytes
+       dmub_flush_buffer_mem(&dc->dmub_srv->dmub->scratch_mem_fb);
+
+       // Copy iramtable into cw7
+       memcpy(dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, (void *)src, bytes);
+
+       // Fw will copy from cw7 to fw_state
+       cmd.abm_init_config.header.type = DMUB_CMD__ABM;
+       cmd.abm_init_config.header.sub_type = DMUB_CMD__ABM_INIT_CONFIG;
+       cmd.abm_init_config.abm_init_config_data.src.quad_part = dc->dmub_srv->dmub->scratch_mem_fb.gpu_addr;
+       cmd.abm_init_config.abm_init_config_data.bytes = bytes;
+       cmd.abm_init_config.header.payload_bytes = sizeof(struct dmub_cmd_abm_init_config_data);
+
+       dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
+       dc_dmub_srv_cmd_execute(dc->dmub_srv);
+       dc_dmub_srv_wait_idle(dc->dmub_srv);
+
+       return true;
+}
+
+static const struct abm_funcs abm_funcs = {
+       .abm_init = dmub_abm_init,
+       .set_abm_level = dmub_abm_set_level,
+       .set_pipe = dmub_abm_set_pipe,
+       .set_backlight_level_pwm = dmub_abm_set_backlight_level_pwm,
+       .get_current_backlight = dmub_abm_get_current_backlight,
+       .get_target_backlight = dmub_abm_get_target_backlight,
+       .set_abm_immediate_disable = dmub_abm_immediate_disable,
+       .init_abm_config = dmub_abm_init_config,
+};
+
+static void dmub_abm_construct(
+       struct dce_abm *abm_dce,
+       struct dc_context *ctx,
+       const struct dce_abm_registers *regs,
+       const struct dce_abm_shift *abm_shift,
+       const struct dce_abm_mask *abm_mask)
+{
+       struct abm *base = &abm_dce->base;
+
+       base->ctx = ctx;
+       base->funcs = &abm_funcs;
+       base->dmcu_is_running = false;
+
+       abm_dce->regs = regs;
+       abm_dce->abm_shift = abm_shift;
+       abm_dce->abm_mask = abm_mask;
+}
+
+struct abm *dmub_abm_create(
+       struct dc_context *ctx,
+       const struct dce_abm_registers *regs,
+       const struct dce_abm_shift *abm_shift,
+       const struct dce_abm_mask *abm_mask)
+{
+       struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_KERNEL);
+
+       if (abm_dce == NULL) {
+               BREAK_TO_DEBUGGER();
+               return NULL;
+       }
+
+       dmub_abm_construct(abm_dce, ctx, regs, abm_shift, abm_mask);
+
+       return &abm_dce->base;
+}
+
+void dmub_abm_destroy(struct abm **abm)
+{
+       struct dce_abm *abm_dce = TO_DMUB_ABM(*abm);
+
+       kfree(abm_dce);
+       *abm = NULL;
+}
similarity index 75%
rename from drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
rename to drivers/gpu/drm/amd/display/dc/dce/dmub_abm.h
index 26583f346c3957d74da49e75ce4ed677a5958b40..3a5d5ac7a86eccc42d20304ceb296e1bade02740 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright 2012-16 Advanced Micro Devices, Inc.
+ * Copyright 2019 Advanced Micro Devices, Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
  *
  */
 
-#include "core_types.h"
-#include "logger.h"
-#include "include/logger_interface.h"
-#include "dm_helpers.h"
+#ifndef __DMUB_ABM_H__
+#define __DMUB_ABM_H__
 
-void dc_conn_log_hex_linux(const uint8_t *hex_data, int hex_data_count)
-{
-       int i;
+#include "abm.h"
+#include "dce_abm.h"
 
-       if (hex_data)
-               for (i = 0; i < hex_data_count; i++)
-                       DC_LOG_DEBUG("%2.2X ", hex_data[i]);
-}
+struct abm *dmub_abm_create(
+       struct dc_context *ctx,
+       const struct dce_abm_registers *regs,
+       const struct dce_abm_shift *abm_shift,
+       const struct dce_abm_mask *abm_mask);
 
+void dmub_abm_destroy(struct abm **abm);
+
+#endif
index bc109d4fc6e6bfcbed02821c0d2b93b4e5aaee77..044a0133ebb13b1d82067e7013889c6c86c0db98 100644 (file)
 #include "dmub_psr.h"
 #include "dc.h"
 #include "dc_dmub_srv.h"
-#include "../../dmub/inc/dmub_srv.h"
-#include "../../dmub/inc/dmub_gpint_cmd.h"
+#include "dmub/dmub_srv.h"
 #include "core_types.h"
 
 #define MAX_PIPES 6
 
+/**
+ * Convert dmcub psr state to dmcu psr state.
+ */
+static void convert_psr_state(uint32_t *psr_state)
+{
+       if (*psr_state == 0)
+               *psr_state = 0;
+       else if (*psr_state == 0x10)
+               *psr_state = 1;
+       else if (*psr_state == 0x11)
+               *psr_state = 2;
+       else if (*psr_state == 0x20)
+               *psr_state = 3;
+       else if (*psr_state == 0x21)
+               *psr_state = 4;
+       else if (*psr_state == 0x30)
+               *psr_state = 5;
+       else if (*psr_state == 0x31)
+               *psr_state = 6;
+       else if (*psr_state == 0x40)
+               *psr_state = 7;
+       else if (*psr_state == 0x41)
+               *psr_state = 8;
+       else if (*psr_state == 0x42)
+               *psr_state = 9;
+       else if (*psr_state == 0x43)
+               *psr_state = 10;
+       else if (*psr_state == 0x44)
+               *psr_state = 11;
+       else if (*psr_state == 0x50)
+               *psr_state = 12;
+       else if (*psr_state == 0x51)
+               *psr_state = 13;
+       else if (*psr_state == 0x52)
+               *psr_state = 14;
+       else if (*psr_state == 0x53)
+               *psr_state = 15;
+}
+
 /**
  * Get PSR state from firmware.
  */
@@ -43,6 +81,8 @@ static void dmub_psr_get_state(struct dmub_psr *dmub, uint32_t *psr_state)
        dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, 0, 30);
 
        dmub_srv_get_gpint_response(srv, psr_state);
+
+       convert_psr_state(psr_state);
 }
 
 /**
@@ -53,19 +93,23 @@ static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state *
        union dmub_rb_cmd cmd;
        struct dc_context *dc = dmub->ctx;
 
+       if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED)
+               return false;
+
        cmd.psr_set_version.header.type = DMUB_CMD__PSR;
        cmd.psr_set_version.header.sub_type = DMUB_CMD__PSR_SET_VERSION;
-
-       if (stream->psr_version == 0x0) // Unsupported
-               return false;
-       else if (stream->psr_version == 0x1)
+       switch (stream->link->psr_settings.psr_version) {
+       case DC_PSR_VERSION_1:
                cmd.psr_set_version.psr_set_version_data.version = PSR_VERSION_1;
-       else if (stream->psr_version == 0x2)
-               cmd.psr_set_version.psr_set_version_data.version = PSR_VERSION_2;
-
-       cmd.psr_enable.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_version_data);
+               break;
+       case DC_PSR_VERSION_UNSUPPORTED:
+       default:
+               cmd.psr_set_version.psr_set_version_data.version = PSR_VERSION_UNSUPPORTED;
+               break;
+       }
+       cmd.psr_set_version.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_version_data);
 
-       dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_enable.header);
+       dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
        dc_dmub_srv_cmd_execute(dc->dmub_srv);
        dc_dmub_srv_wait_idle(dc->dmub_srv);
 
@@ -89,7 +133,7 @@ static void dmub_psr_enable(struct dmub_psr *dmub, bool enable)
 
        cmd.psr_enable.header.payload_bytes = 0; // Send header only
 
-       dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_enable.header);
+       dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
        dc_dmub_srv_cmd_execute(dc->dmub_srv);
        dc_dmub_srv_wait_idle(dc->dmub_srv);
 }
@@ -113,7 +157,7 @@ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level)
        cmd.psr_set_level.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_level_data);
        cmd.psr_set_level.psr_set_level_data.psr_level = psr_level;
 
-       dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_set_level.header);
+       dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
        dc_dmub_srv_cmd_execute(dc->dmub_srv);
        dc_dmub_srv_wait_idle(dc->dmub_srv);
 }
@@ -162,7 +206,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
        cmd.psr_copy_settings.header.payload_bytes = sizeof(struct dmub_cmd_psr_copy_settings_data);
 
        // Hw insts
-       copy_settings_data->dpphy_inst                          = psr_context->phyType;
+       copy_settings_data->dpphy_inst                          = psr_context->transmitterId;
        copy_settings_data->aux_inst                            = psr_context->channel;
        copy_settings_data->digfe_inst                          = psr_context->engineId;
        copy_settings_data->digbe_inst                          = psr_context->transmitterId;
@@ -187,8 +231,10 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
        copy_settings_data->smu_optimizations_en                = psr_context->allow_smu_optimizations;
        copy_settings_data->frame_delay                         = psr_context->frame_delay;
        copy_settings_data->frame_cap_ind                       = psr_context->psrFrameCaptureIndicationReq;
+       copy_settings_data->debug.visual_confirm                = dc->dc->debug.visual_confirm == VISUAL_CONFIRM_PSR ?
+                                                                       true : false;
 
-       dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_copy_settings.header);
+       dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
        dc_dmub_srv_cmd_execute(dc->dmub_srv);
        dc_dmub_srv_wait_idle(dc->dmub_srv);
 
index 8f78bf9abbca1b070b8af6c217bc6967d1886cd9..a28c4ae0f2599062db4c7e74d7245e31ec79ca0d 100644 (file)
@@ -46,6 +46,7 @@
 #include "dce/dce_audio.h"
 #include "dce/dce_hwseq.h"
 #include "dce100/dce100_hw_sequencer.h"
+#include "dce/dce_panel_cntl.h"
 
 #include "reg_helper.h"
 
@@ -249,6 +250,18 @@ static const struct dce_stream_encoder_mask se_mask = {
                SE_COMMON_MASK_SH_LIST_DCE80_100(_MASK)
 };
 
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+       { DCE_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+       DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+       DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
 #define opp_regs(id)\
 [id] = {\
        OPP_DCE_100_REG_LIST(id),\
@@ -627,6 +640,23 @@ struct link_encoder *dce100_link_encoder_create(
        return &enc110->base;
 }
 
+static struct panel_cntl *dce100_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+       struct dce_panel_cntl *panel_cntl =
+               kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+       if (!panel_cntl)
+               return NULL;
+
+       dce_panel_cntl_construct(panel_cntl,
+                       init_data,
+                       &panel_cntl_regs[init_data->inst],
+                       &panel_cntl_shift,
+                       &panel_cntl_mask);
+
+       return &panel_cntl->base;
+}
+
 struct output_pixel_processor *dce100_opp_create(
        struct dc_context *ctx,
        uint32_t inst)
@@ -943,6 +973,7 @@ struct stream_encoder *dce100_find_first_free_match_stream_enc_for_link(
 static const struct resource_funcs dce100_res_pool_funcs = {
        .destroy = dce100_destroy_resource_pool,
        .link_enc_create = dce100_link_encoder_create,
+       .panel_cntl_create = dce100_panel_cntl_create,
        .validate_bandwidth = dce100_validate_bandwidth,
        .validate_plane = dce100_validate_plane,
        .add_stream_to_ctx = dce100_add_stream_to_ctx,
index 10527593868cc91031c48031c9d5532f4f0d5dd0..b77e9dc1608634ca9ad7329e5ecd5081234b990e 100644 (file)
@@ -53,6 +53,7 @@
 #include "abm.h"
 #include "audio.h"
 #include "reg_helper.h"
+#include "panel_cntl.h"
 
 /* include DCE11 register header files */
 #include "dce/dce_11_0_d.h"
@@ -695,31 +696,6 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
 
 
 
-}
-
-/*todo: cloned in stream enc, fix*/
-bool dce110_is_panel_backlight_on(struct dc_link *link)
-{
-       struct dc_context *ctx = link->ctx;
-       struct dce_hwseq *hws = ctx->dc->hwseq;
-       uint32_t value;
-
-       REG_GET(LVTMA_PWRSEQ_CNTL, LVTMA_BLON, &value);
-
-       return value;
-}
-
-bool dce110_is_panel_powered_on(struct dc_link *link)
-{
-       struct dc_context *ctx = link->ctx;
-       struct dce_hwseq *hws = ctx->dc->hwseq;
-       uint32_t pwr_seq_state, dig_on, dig_on_ovrd;
-
-       REG_GET(LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, &pwr_seq_state);
-
-       REG_GET_2(LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, &dig_on, LVTMA_DIGON_OVRD, &dig_on_ovrd);
-
-       return (pwr_seq_state == 1) || (dig_on == 1 && dig_on_ovrd == 1);
 }
 
 static enum bp_result link_transmitter_control(
@@ -810,7 +786,6 @@ void dce110_edp_power_control(
                bool power_up)
 {
        struct dc_context *ctx = link->ctx;
-       struct dce_hwseq *hwseq = ctx->dc->hwseq;
        struct bp_transmitter_control cntl = { 0 };
        enum bp_result bp_result;
 
@@ -821,7 +796,11 @@ void dce110_edp_power_control(
                return;
        }
 
-       if (power_up != hwseq->funcs.is_panel_powered_on(link)) {
+       if (!link->panel_cntl)
+               return;
+
+       if (power_up !=
+               link->panel_cntl->funcs->is_panel_powered_on(link->panel_cntl)) {
                /* Send VBIOS command to prompt eDP panel power */
                if (power_up) {
                        unsigned long long current_ts = dm_get_timestamp(ctx);
@@ -892,7 +871,6 @@ void dce110_edp_backlight_control(
                bool enable)
 {
        struct dc_context *ctx = link->ctx;
-       struct dce_hwseq *hws = ctx->dc->hwseq;
        struct bp_transmitter_control cntl = { 0 };
 
        if (dal_graphics_object_id_get_connector_id(link->link_enc->connector)
@@ -901,7 +879,8 @@ void dce110_edp_backlight_control(
                return;
        }
 
-       if (enable && hws->funcs.is_panel_backlight_on(link)) {
+       if (enable && link->panel_cntl &&
+               link->panel_cntl->funcs->is_panel_backlight_on(link->panel_cntl)) {
                DC_LOG_HW_RESUME_S3(
                                "%s: panel already powered up. Do nothing.\n",
                                __func__);
@@ -1087,7 +1066,7 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
 
        if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
                hws->funcs.edp_backlight_control(link, false);
-               dc_link_set_abm_disable(link);
+               link->dc->hwss.set_abm_immediate_disable(pipe_ctx);
        }
 
        if (dc_is_dp_signal(pipe_ctx->stream->signal))
@@ -1432,7 +1411,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(
 
        pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0;
 
-       pipe_ctx->stream->link->psr_feature_enabled = false;
+       pipe_ctx->stream->link->psr_settings.psr_feature_enabled = false;
 
        return DC_OK;
 }
@@ -1838,7 +1817,7 @@ static bool should_enable_fbc(struct dc *dc,
                return false;
 
        /* PSR should not be enabled */
-       if (pipe_ctx->stream->link->psr_feature_enabled)
+       if (pipe_ctx->stream->link->psr_settings.psr_feature_enabled)
                return false;
 
        /* Nothing to compress */
@@ -2376,6 +2355,7 @@ static void init_hw(struct dc *dc)
        struct abm *abm;
        struct dmcu *dmcu;
        struct dce_hwseq *hws = dc->hwseq;
+       uint32_t backlight = MAX_BACKLIGHT_LEVEL;
 
        bp = dc->ctx->dc_bios;
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -2422,12 +2402,17 @@ static void init_hw(struct dc *dc)
                audio->funcs->hw_init(audio);
        }
 
-       abm = dc->res_pool->abm;
-       if (abm != NULL) {
-               abm->funcs->init_backlight(abm);
-               abm->funcs->abm_init(abm);
+       for (i = 0; i < dc->link_count; i++) {
+               struct dc_link *link = dc->links[i];
+
+               if (link->panel_cntl)
+                       backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
        }
 
+       abm = dc->res_pool->abm;
+       if (abm != NULL)
+               abm->funcs->abm_init(abm, backlight);
+
        dmcu = dc->res_pool->dmcu;
        if (dmcu != NULL && abm != NULL)
                abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
@@ -2735,6 +2720,53 @@ void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
                                pipe_ctx->plane_res.xfm, attributes);
 }
 
+bool dce110_set_backlight_level(struct pipe_ctx *pipe_ctx,
+               uint32_t backlight_pwm_u16_16,
+               uint32_t frame_ramp)
+{
+       struct dc_link *link = pipe_ctx->stream->link;
+       struct dc  *dc = link->ctx->dc;
+       struct abm *abm = pipe_ctx->stream_res.abm;
+       struct panel_cntl *panel_cntl = link->panel_cntl;
+       struct dmcu *dmcu = dc->res_pool->dmcu;
+       bool fw_set_brightness = true;
+       /* DMCU -1 for all controller id values,
+        * therefore +1 here
+        */
+       uint32_t controller_id = pipe_ctx->stream_res.tg->inst + 1;
+
+       if (abm == NULL || panel_cntl == NULL || (abm->funcs->set_backlight_level_pwm == NULL))
+               return false;
+
+       if (dmcu)
+               fw_set_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
+
+       if (!fw_set_brightness && panel_cntl->funcs->driver_set_backlight)
+               panel_cntl->funcs->driver_set_backlight(panel_cntl, backlight_pwm_u16_16);
+       else
+               abm->funcs->set_backlight_level_pwm(
+                               abm,
+                               backlight_pwm_u16_16,
+                               frame_ramp,
+                               controller_id,
+                               link->panel_cntl->inst);
+
+       return true;
+}
+
+void dce110_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx)
+{
+       struct abm *abm = pipe_ctx->stream_res.abm;
+       struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
+
+       if (abm)
+               abm->funcs->set_abm_immediate_disable(abm,
+                               pipe_ctx->stream->link->panel_cntl->inst);
+
+       if (panel_cntl)
+               panel_cntl->funcs->store_backlight_level(panel_cntl);
+}
+
 static const struct hw_sequencer_funcs dce110_funcs = {
        .program_gamut_remap = program_gamut_remap,
        .program_output_csc = program_output_csc,
@@ -2769,7 +2801,9 @@ static const struct hw_sequencer_funcs dce110_funcs = {
        .edp_power_control = dce110_edp_power_control,
        .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
        .set_cursor_position = dce110_set_cursor_position,
-       .set_cursor_attribute = dce110_set_cursor_attribute
+       .set_cursor_attribute = dce110_set_cursor_attribute,
+       .set_backlight_level = dce110_set_backlight_level,
+       .set_abm_immediate_disable = dce110_set_abm_immediate_disable,
 };
 
 static const struct hwseq_private_funcs dce110_private_funcs = {
@@ -2785,8 +2819,6 @@ static const struct hwseq_private_funcs dce110_private_funcs = {
        .disable_stream_gating = NULL,
        .enable_stream_gating = NULL,
        .edp_backlight_control = dce110_edp_backlight_control,
-       .is_panel_backlight_on = dce110_is_panel_backlight_on,
-       .is_panel_powered_on = dce110_is_panel_powered_on,
 };
 
 void dce110_hw_sequencer_construct(struct dc *dc)
index 34be166e8ff0eaf146b0b95cd414989d68d9e9cd..fe5326df00f7eb54b3d9e8981c73d5dd8213f437 100644 (file)
@@ -85,9 +85,10 @@ void dce110_edp_wait_for_hpd_ready(
                struct dc_link *link,
                bool power_up);
 
-bool dce110_is_panel_backlight_on(struct dc_link *link);
-
-bool dce110_is_panel_powered_on(struct dc_link *link);
+bool dce110_set_backlight_level(struct pipe_ctx *pipe_ctx,
+               uint32_t backlight_pwm_u16_16,
+               uint32_t frame_ramp);
+void dce110_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx);
 
 #endif /* __DC_HWSS_DCE110_H__ */
 
index 4245e1f818a3d0d84138e3edd33a7c4d4a9d887d..e096d2b95ef9d49233333e7cff52ccab43de4162 100644 (file)
@@ -679,8 +679,7 @@ void dce110_opp_v_set_csc_default(
        if (default_adjust->force_hw_default == false) {
                const struct out_csc_color_matrix *elm;
                /* currently parameter not in use */
-               enum grph_color_adjust_option option =
-                       GRPH_COLOR_MATRIX_HW_DEFAULT;
+               enum grph_color_adjust_option option;
                uint32_t i;
                /*
                 * HW default false we program locally defined matrix
index bf14e9ab040ce4083ee6ed9546b354d2de9b644a..9597fc79d7faf97e6d09cecc87fb41284638192d 100644 (file)
@@ -53,6 +53,7 @@
 #include "dce/dce_abm.h"
 #include "dce/dce_dmcu.h"
 #include "dce/dce_i2c.h"
+#include "dce/dce_panel_cntl.h"
 
 #define DC_LOGGER \
                dc->ctx->logger
@@ -275,6 +276,18 @@ static const struct dce_stream_encoder_mask se_mask = {
                SE_COMMON_MASK_SH_LIST_DCE110(_MASK)
 };
 
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+       { DCE_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+       DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+       DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
 static const struct dce110_aux_registers_shift aux_shift = {
        DCE_AUX_MASK_SH_LIST(__SHIFT)
 };
@@ -673,6 +686,23 @@ static struct link_encoder *dce110_link_encoder_create(
        return &enc110->base;
 }
 
+static struct panel_cntl *dce110_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+       struct dce_panel_cntl *panel_cntl =
+               kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+       if (!panel_cntl)
+               return NULL;
+
+       dce_panel_cntl_construct(panel_cntl,
+                       init_data,
+                       &panel_cntl_regs[init_data->inst],
+                       &panel_cntl_shift,
+                       &panel_cntl_mask);
+
+       return &panel_cntl->base;
+}
+
 static struct output_pixel_processor *dce110_opp_create(
        struct dc_context *ctx,
        uint32_t inst)
@@ -1203,6 +1233,7 @@ struct stream_encoder *dce110_find_first_free_match_stream_enc_for_link(
 static const struct resource_funcs dce110_res_pool_funcs = {
        .destroy = dce110_destroy_resource_pool,
        .link_enc_create = dce110_link_encoder_create,
+       .panel_cntl_create = dce110_panel_cntl_create,
        .validate_bandwidth = dce110_validate_bandwidth,
        .validate_plane = dce110_validate_plane,
        .acquire_idle_pipe_for_layer = dce110_acquire_underlay,
index 700ad8b3e54b2910b16905cd46d0d8b0fe2f7391..51b3fe50267050ed6de1ae36b5bcd15c687ceeb6 100644 (file)
@@ -51,6 +51,7 @@
 #include "dce/dce_dmcu.h"
 #include "dce/dce_aux.h"
 #include "dce/dce_i2c.h"
+#include "dce/dce_panel_cntl.h"
 
 #include "reg_helper.h"
 
@@ -238,6 +239,18 @@ static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = {
                aux_regs(5)
 };
 
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+       { DCE_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+       DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+       DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
 #define hpd_regs(id)\
 [id] = {\
        HPD_REG_LIST(id)\
@@ -398,7 +411,7 @@ static const struct dc_plane_cap plane_cap = {
        .pixel_format_support = {
                        .argb8888 = true,
                        .nv12 = false,
-                       .fp16 = false
+                       .fp16 = true
        },
 
        .max_upscale_factor = {
@@ -631,6 +644,23 @@ struct link_encoder *dce112_link_encoder_create(
        return &enc110->base;
 }
 
+static struct panel_cntl *dce112_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+       struct dce_panel_cntl *panel_cntl =
+               kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+       if (!panel_cntl)
+               return NULL;
+
+       dce_panel_cntl_construct(panel_cntl,
+                       init_data,
+                       &panel_cntl_regs[init_data->inst],
+                       &panel_cntl_shift,
+                       &panel_cntl_mask);
+
+       return &panel_cntl->base;
+}
+
 static struct input_pixel_processor *dce112_ipp_create(
        struct dc_context *ctx, uint32_t inst)
 {
@@ -1021,6 +1051,7 @@ static void dce112_destroy_resource_pool(struct resource_pool **pool)
 static const struct resource_funcs dce112_res_pool_funcs = {
        .destroy = dce112_destroy_resource_pool,
        .link_enc_create = dce112_link_encoder_create,
+       .panel_cntl_create = dce112_panel_cntl_create,
        .validate_bandwidth = dce112_validate_bandwidth,
        .validate_plane = dce100_validate_plane,
        .add_stream_to_ctx = dce112_add_stream_to_ctx,
index 53ab88ef71f5ee25c0f4940ad7f8b1e1333a0251..8f362e8c17870baebaaf31ca8a516ba53692505d 100644 (file)
@@ -44,6 +44,7 @@
 #include "dce/dce_clock_source.h"
 #include "dce/dce_ipp.h"
 #include "dce/dce_mem_input.h"
+#include "dce/dce_panel_cntl.h"
 
 #include "dce110/dce110_hw_sequencer.h"
 #include "dce120/dce120_hw_sequencer.h"
@@ -293,6 +294,18 @@ static const struct dce_stream_encoder_mask se_mask = {
                SE_COMMON_MASK_SH_LIST_DCE120(_MASK)
 };
 
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+       { DCE_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+       DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+       DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
 static const struct dce110_aux_registers_shift aux_shift = {
        DCE12_AUX_MASK_SH_LIST(__SHIFT)
 };
@@ -503,7 +516,7 @@ static const struct dc_plane_cap plane_cap = {
        .pixel_format_support = {
                        .argb8888 = true,
                        .nv12 = false,
-                       .fp16 = false
+                       .fp16 = true
        },
 
        .max_upscale_factor = {
@@ -715,6 +728,23 @@ static struct link_encoder *dce120_link_encoder_create(
        return &enc110->base;
 }
 
+static struct panel_cntl *dce120_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+       struct dce_panel_cntl *panel_cntl =
+               kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+       if (!panel_cntl)
+               return NULL;
+
+       dce_panel_cntl_construct(panel_cntl,
+                       init_data,
+                       &panel_cntl_regs[init_data->inst],
+                       &panel_cntl_shift,
+                       &panel_cntl_mask);
+
+       return &panel_cntl->base;
+}
+
 static struct input_pixel_processor *dce120_ipp_create(
        struct dc_context *ctx, uint32_t inst)
 {
@@ -880,6 +910,7 @@ static void dce120_destroy_resource_pool(struct resource_pool **pool)
 static const struct resource_funcs dce120_res_pool_funcs = {
        .destroy = dce120_destroy_resource_pool,
        .link_enc_create = dce120_link_encoder_create,
+       .panel_cntl_create = dce120_panel_cntl_create,
        .validate_bandwidth = dce112_validate_bandwidth,
        .validate_plane = dce100_validate_plane,
        .add_stream_to_ctx = dce112_add_stream_to_ctx,
index 893261c81854b9eac7d1f0803329774169e44823..d2ceebdbdf51607bf4a74dd3d674c0cf5eba67a9 100644 (file)
 #include "dce/dce_8_0_d.h"
 #include "dce/dce_8_0_sh_mask.h"
 
-struct dce80_hw_seq_reg_offsets {
-       uint32_t crtc;
-};
-
-static const struct dce80_hw_seq_reg_offsets reg_offsets[] = {
-{
-       .crtc = (mmCRTC0_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
-},
-{
-       .crtc = (mmCRTC1_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
-},
-{
-       .crtc = (mmCRTC2_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
-},
-{
-       .crtc = (mmCRTC3_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
-},
-{
-       .crtc = (mmCRTC4_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
-},
-{
-       .crtc = (mmCRTC5_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
-}
-};
-
-#define HW_REG_CRTC(reg, id)\
-       (reg + reg_offsets[id].crtc)
-
 /*******************************************************************************
  * Private definitions
  ******************************************************************************/
index 2ad5c28c6e66caf1c08afe847a3a722f8d2cc1d1..a19be9de2df7d87a76e14db7792abdae35a82c18 100644 (file)
@@ -50,6 +50,7 @@
 #include "dce/dce_hwseq.h"
 #include "dce80/dce80_hw_sequencer.h"
 #include "dce100/dce100_resource.h"
+#include "dce/dce_panel_cntl.h"
 
 #include "reg_helper.h"
 
@@ -266,6 +267,18 @@ static const struct dce_stream_encoder_mask se_mask = {
                SE_COMMON_MASK_SH_LIST_DCE80_100(_MASK)
 };
 
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+       { DCE_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+       DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+       DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
 #define opp_regs(id)\
 [id] = {\
        OPP_DCE_80_REG_LIST(id),\
@@ -728,6 +741,23 @@ struct link_encoder *dce80_link_encoder_create(
        return &enc110->base;
 }
 
+static struct panel_cntl *dce80_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+       struct dce_panel_cntl *panel_cntl =
+               kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+       if (!panel_cntl)
+               return NULL;
+
+       dce_panel_cntl_construct(panel_cntl,
+                       init_data,
+                       &panel_cntl_regs[init_data->inst],
+                       &panel_cntl_shift,
+                       &panel_cntl_mask);
+
+       return &panel_cntl->base;
+}
+
 struct clock_source *dce80_clock_source_create(
        struct dc_context *ctx,
        struct dc_bios *bios,
@@ -909,6 +939,7 @@ static void dce80_destroy_resource_pool(struct resource_pool **pool)
 static const struct resource_funcs dce80_res_pool_funcs = {
        .destroy = dce80_destroy_resource_pool,
        .link_enc_create = dce80_link_encoder_create,
+       .panel_cntl_create = dce80_panel_cntl_create,
        .validate_bandwidth = dce80_validate_bandwidth,
        .validate_plane = dce100_validate_plane,
        .add_stream_to_ctx = dce100_add_stream_to_ctx,
index 0e682b5aa3ebe0b6ffc700be5cfc0bec0d12a4df..7f8456b9988b243711e75136ff7fd6a0fd5f4ba0 100644 (file)
@@ -134,13 +134,6 @@ bool dpp1_get_optimal_number_of_taps(
                struct scaler_data *scl_data,
                const struct scaling_taps *in_taps)
 {
-       uint32_t pixel_width;
-
-       if (scl_data->viewport.width > scl_data->recout.width)
-               pixel_width = scl_data->recout.width;
-       else
-               pixel_width = scl_data->viewport.width;
-
        /* Some ASICs does not support  FP16 scaling, so we reject modes require this*/
        if (scl_data->format == PIXEL_FORMAT_FP16 &&
                dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
index deccab0228d2b648ee5512de90f697df799f2c50..75637c291e75468d8525f48b07af09155396ddd2 100644 (file)
@@ -93,7 +93,6 @@ void hubbub1_wm_read_state(struct hubbub *hubbub,
 void hubbub1_allow_self_refresh_control(struct hubbub *hubbub, bool allow)
 {
        struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
-
        /*
         * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 1 means do not allow stutter
         * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0 means allow stutter
index 31b64733d693576b23e751616b5be1c8c34fed7e..319366ebb44fe4f8e1521c842e05e1cc5340aae1 100644 (file)
@@ -1139,6 +1139,8 @@ void hubp1_cursor_set_position(
        int src_y_offset = pos->y - pos->y_hotspot - param->viewport.y;
        int x_hotspot = pos->x_hotspot;
        int y_hotspot = pos->y_hotspot;
+       int cursor_height = (int)hubp->curs_attr.height;
+       int cursor_width = (int)hubp->curs_attr.width;
        uint32_t dst_x_offset;
        uint32_t cur_en = pos->enable ? 1 : 0;
 
@@ -1152,10 +1154,16 @@ void hubp1_cursor_set_position(
        if (hubp->curs_attr.address.quad_part == 0)
                return;
 
+       // Rotated cursor width/height and hotspots tweaks for offset calculation
        if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) {
-               src_x_offset = pos->y - pos->y_hotspot - param->viewport.x;
-               y_hotspot = pos->x_hotspot;
-               x_hotspot = pos->y_hotspot;
+               swap(cursor_height, cursor_width);
+               if (param->rotation == ROTATION_ANGLE_90) {
+                       src_x_offset = pos->x - pos->y_hotspot - param->viewport.x;
+                       src_y_offset = pos->y - pos->x_hotspot - param->viewport.y;
+               }
+       } else if (param->rotation == ROTATION_ANGLE_180) {
+               src_x_offset = pos->x - param->viewport.x;
+               src_y_offset = pos->y - param->viewport.y;
        }
 
        if (param->mirror) {
@@ -1177,13 +1185,13 @@ void hubp1_cursor_set_position(
        if (src_x_offset >= (int)param->viewport.width)
                cur_en = 0;  /* not visible beyond right edge*/
 
-       if (src_x_offset + (int)hubp->curs_attr.width <= 0)
+       if (src_x_offset + cursor_width <= 0)
                cur_en = 0;  /* not visible beyond left edge*/
 
        if (src_y_offset >= (int)param->viewport.height)
                cur_en = 0;  /* not visible beyond bottom edge*/
 
-       if (src_y_offset + (int)hubp->curs_attr.height <= 0)
+       if (src_y_offset + cursor_height <= 0)
                cur_en = 0;  /* not visible beyond top edge*/
 
        if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
index 416afb99529d1864031c7d9c5d809aaf2e8c776d..77f16921e7f0acbf965ccd81ac5c08b1761efcd5 100644 (file)
@@ -737,7 +737,8 @@ void dcn10_bios_golden_init(struct dc *dc)
        if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
                if (allow_self_fresh_force_enable == false &&
                                dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
-                       dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, true);
+                       dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
+                                                                               !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
 
 }
 
@@ -826,6 +827,14 @@ enum dc_status dcn10_enable_stream_timing(
        color_space = stream->output_color_space;
        color_space_to_black_color(dc, color_space, &black_color);
 
+       /*
+        * The way 420 is packed, 2 channels carry Y component, 1 channel
+        * alternate between Cb and Cr, so both channels need the pixel
+        * value for Y
+        */
+       if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
+               black_color.color_r_cr = black_color.color_g_y;
+
        if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
                pipe_ctx->stream_res.tg->funcs->set_blank_color(
                                pipe_ctx->stream_res.tg,
@@ -903,7 +912,7 @@ static void dcn10_reset_back_end_for_pipe(
        if (pipe_ctx->top_pipe == NULL) {
 
                if (pipe_ctx->stream_res.abm)
-                       pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm);
+                       dc->hwss.set_abm_immediate_disable(pipe_ctx);
 
                pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
 
@@ -1238,12 +1247,13 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
 
 void dcn10_init_hw(struct dc *dc)
 {
-       int i;
+       int i, j;
        struct abm *abm = dc->res_pool->abm;
        struct dmcu *dmcu = dc->res_pool->dmcu;
        struct dce_hwseq *hws = dc->hwseq;
        struct dc_bios *dcb = dc->ctx->dc_bios;
        struct resource_pool *res_pool = dc->res_pool;
+       uint32_t backlight = MAX_BACKLIGHT_LEVEL;
 
        if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
                dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
@@ -1333,17 +1343,28 @@ void dcn10_init_hw(struct dc *dc)
                                continue;
 
                        /*
-                        * core_link_read_dpcd() will invoke dm_helpers_dp_read_dpcd(),
-                        * which needs to read dpcd info with the help of aconnector.
-                        * If aconnector (dc->links[i]->prev) is NULL, then dpcd status
-                        * cannot be read.
+                        * If any of the displays are lit up turn them off.
+                        * The reason is that some MST hubs cannot be turned off
+                        * completely until we tell them to do so.
+                        * If not turned off, then displays connected to MST hub
+                        * won't light up.
                         */
-                       if (dc->links[i]->priv) {
-                               /* if any of the displays are lit up turn them off */
-                               status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
-                                                               &dpcd_power_state, sizeof(dpcd_power_state));
-                               if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0)
-                                       dp_receiver_power_ctrl(dc->links[i], false);
+                       status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
+                                                       &dpcd_power_state, sizeof(dpcd_power_state));
+                       if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) {
+                               /* blank dp stream before power off receiver*/
+                               if (dc->links[i]->link_enc->funcs->get_dig_frontend) {
+                                       unsigned int fe = dc->links[i]->link_enc->funcs->get_dig_frontend(dc->links[i]->link_enc);
+
+                                       for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
+                                               if (fe == dc->res_pool->stream_enc[j]->id) {
+                                                       dc->res_pool->stream_enc[j]->funcs->dp_blank(
+                                                                               dc->res_pool->stream_enc[j]);
+                                                       break;
+                                               }
+                                       }
+                               }
+                               dp_receiver_power_ctrl(dc->links[i], false);
                        }
                }
        }
@@ -1361,17 +1382,54 @@ void dcn10_init_hw(struct dc *dc)
                                        !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
        }
 
+       /* In headless boot cases, DIG may be turned
+        * on which causes HW/SW discrepancies.
+        * To avoid this, power down hardware on boot
+        * if DIG is turned on and seamless boot not enabled
+        */
+       if (dc->config.power_down_display_on_boot) {
+               struct dc_link *edp_link = get_edp_link(dc);
+
+               if (edp_link &&
+                               edp_link->link_enc->funcs->is_dig_enabled &&
+                               edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
+                               dc->hwss.edp_backlight_control &&
+                               dc->hwss.power_down &&
+                               dc->hwss.edp_power_control) {
+                       dc->hwss.edp_backlight_control(edp_link, false);
+                       dc->hwss.power_down(dc);
+                       dc->hwss.edp_power_control(edp_link, false);
+               } else {
+                       for (i = 0; i < dc->link_count; i++) {
+                               struct dc_link *link = dc->links[i];
+
+                               if (link->link_enc->funcs->is_dig_enabled &&
+                                               link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
+                                               dc->hwss.power_down) {
+                                       dc->hwss.power_down(dc);
+                                       break;
+                               }
+
+                       }
+               }
+       }
+
        for (i = 0; i < res_pool->audio_count; i++) {
                struct audio *audio = res_pool->audios[i];
 
                audio->funcs->hw_init(audio);
        }
 
-       if (abm != NULL) {
-               abm->funcs->init_backlight(abm);
-               abm->funcs->abm_init(abm);
+       for (i = 0; i < dc->link_count; i++) {
+               struct dc_link *link = dc->links[i];
+
+               if (link->panel_cntl)
+                       backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
        }
 
+       if (abm != NULL)
+               abm->funcs->abm_init(abm, backlight);
+
        if (dmcu != NULL && !dmcu->auto_load_dmcu)
                dmcu->funcs->dmcu_init(dmcu);
 
@@ -2164,25 +2222,25 @@ void dcn10_get_surface_visual_confirm_color(
 
        switch (pipe_ctx->plane_res.scl_data.format) {
        case PIXEL_FORMAT_ARGB8888:
-               /* set boarder color to red */
+               /* set border color to red */
                color->color_r_cr = color_value;
                break;
 
        case PIXEL_FORMAT_ARGB2101010:
-               /* set boarder color to blue */
+               /* set border color to blue */
                color->color_b_cb = color_value;
                break;
        case PIXEL_FORMAT_420BPP8:
-               /* set boarder color to green */
+               /* set border color to green */
                color->color_g_y = color_value;
                break;
        case PIXEL_FORMAT_420BPP10:
-               /* set boarder color to yellow */
+               /* set border color to yellow */
                color->color_g_y = color_value;
                color->color_r_cr = color_value;
                break;
        case PIXEL_FORMAT_FP16:
-               /* set boarder color to white */
+               /* set border color to white */
                color->color_r_cr = color_value;
                color->color_b_cb = color_value;
                color->color_g_y = color_value;
@@ -2207,25 +2265,25 @@ void dcn10_get_hdr_visual_confirm_color(
        switch (top_pipe_ctx->plane_res.scl_data.format) {
        case PIXEL_FORMAT_ARGB2101010:
                if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
-                       /* HDR10, ARGB2101010 - set boarder color to red */
+                       /* HDR10, ARGB2101010 - set border color to red */
                        color->color_r_cr = color_value;
                } else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
-                       /* FreeSync 2 ARGB2101010 - set boarder color to pink */
+                       /* FreeSync 2 ARGB2101010 - set border color to pink */
                        color->color_r_cr = color_value;
                        color->color_b_cb = color_value;
                }
                break;
        case PIXEL_FORMAT_FP16:
                if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
-                       /* HDR10, FP16 - set boarder color to blue */
+                       /* HDR10, FP16 - set border color to blue */
                        color->color_b_cb = color_value;
                } else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
-                       /* FreeSync 2 HDR - set boarder color to green */
+                       /* FreeSync 2 HDR - set border color to green */
                        color->color_g_y = color_value;
                }
                break;
        default:
-               /* SDR - set boarder color to Gray */
+               /* SDR - set border color to Gray */
                color->color_r_cr = color_value/2;
                color->color_b_cb = color_value/2;
                color->color_g_y = color_value/2;
@@ -2274,6 +2332,14 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
                                &blnd_cfg.black_color);
        }
 
+       /*
+        * The way 420 is packed, 2 channels carry Y component, 1 channel
+        * alternate between Cb and Cr, so both channels need the pixel
+        * value for Y
+        */
+       if (pipe_ctx->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
+               blnd_cfg.black_color.color_r_cr = blnd_cfg.black_color.color_g_y;
+
        if (per_pixel_alpha)
                blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
        else
@@ -2510,12 +2576,12 @@ void dcn10_blank_pixel_data(
                if (stream_res->tg->funcs->set_blank)
                        stream_res->tg->funcs->set_blank(stream_res->tg, blank);
                if (stream_res->abm) {
-                       stream_res->abm->funcs->set_pipe(stream_res->abm, stream_res->tg->inst + 1);
+                       stream_res->abm->funcs->set_pipe(stream_res->abm, stream_res->tg->inst + 1,
+                                       stream->link->panel_cntl->inst);
                        stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
                }
        } else if (blank) {
-               if (stream_res->abm)
-                       stream_res->abm->funcs->set_abm_immediate_disable(stream_res->abm);
+               dc->hwss.set_abm_immediate_disable(pipe_ctx);
                if (stream_res->tg->funcs->set_blank)
                        stream_res->tg->funcs->set_blank(stream_res->tg, blank);
        }
index 9e8e32629e4782a8f1e636197d727b14e55003da..7cb8c3fb266563841d819bebec1571782deeb966 100644 (file)
@@ -73,6 +73,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
        .get_clock = dcn10_get_clock,
        .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
        .calc_vupdate_position = dcn10_calc_vupdate_position,
+       .set_backlight_level = dce110_set_backlight_level,
+       .set_abm_immediate_disable = dce110_set_abm_immediate_disable,
 };
 
 static const struct hwseq_private_funcs dcn10_private_funcs = {
@@ -89,8 +91,6 @@ static const struct hwseq_private_funcs dcn10_private_funcs = {
        .reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap,
        .enable_stream_timing = dcn10_enable_stream_timing,
        .edp_backlight_control = dce110_edp_backlight_control,
-       .is_panel_backlight_on = dce110_is_panel_backlight_on,
-       .is_panel_powered_on = dce110_is_panel_powered_on,
        .disable_stream_gating = NULL,
        .enable_stream_gating = NULL,
        .setup_vupdate_interrupt = dcn10_setup_vupdate_interrupt,
index d3617d6785a7edafa8d28314d67854295e0ca0b9..7fd385be3f3def5756ef9fd88e6e5c66f720cbb4 100644 (file)
@@ -90,7 +90,8 @@ static const struct link_encoder_funcs dcn10_lnk_enc_funcs = {
        .is_dig_enabled = dcn10_is_dig_enabled,
        .get_dig_frontend = dcn10_get_dig_frontend,
        .get_dig_mode = dcn10_get_dig_mode,
-       .destroy = dcn10_link_encoder_destroy
+       .destroy = dcn10_link_encoder_destroy,
+       .get_max_link_cap = dcn10_link_encoder_get_max_link_cap,
 };
 
 static enum bp_result link_transmitter_control(
@@ -1370,7 +1371,6 @@ void dcn10_link_encoder_disable_hpd(struct link_encoder *enc)
                        DC_HPD_EN, 0);
 }
 
-
 #define AUX_REG(reg)\
        (enc10->aux_regs->reg)
 
@@ -1425,3 +1425,19 @@ enum signal_type dcn10_get_dig_mode(
        return SIGNAL_TYPE_NONE;
 }
 
+void dcn10_link_encoder_get_max_link_cap(struct link_encoder *enc,
+       struct dc_link_settings *link_settings)
+{
+       /* Set Default link settings */
+       struct dc_link_settings max_link_cap = {LANE_COUNT_FOUR, LINK_RATE_HIGH,
+                       LINK_SPREAD_05_DOWNSPREAD_30KHZ, false, 0};
+
+       /* Higher link settings based on feature supported */
+       if (enc->features.flags.bits.IS_HBR2_CAPABLE)
+               max_link_cap.link_rate = LINK_RATE_HIGH2;
+
+       if (enc->features.flags.bits.IS_HBR3_CAPABLE)
+               max_link_cap.link_rate = LINK_RATE_HIGH3;
+
+       *link_settings = max_link_cap;
+}
index 762109174fb879650a32df771596410d43d2f564..68395bcc24fddf93b1715ce783c6b7b61de72610 100644 (file)
@@ -575,4 +575,7 @@ void dcn10_aux_initialize(struct dcn10_link_encoder *enc10);
 
 enum signal_type dcn10_get_dig_mode(
        struct link_encoder *enc);
+
+void dcn10_link_encoder_get_max_link_cap(struct link_encoder *enc,
+       struct dc_link_settings *link_settings);
 #endif /* __DC_LINK_ENCODER__DCN10_H__ */
index 17d96ec6acd8fc8d3ba3e17875d7d6c57c0f7190..ec0ab42becbac1136f4e9358e687abc632bc8196 100644 (file)
@@ -299,6 +299,7 @@ void optc1_set_vtg_params(struct timing_generator *optc,
        uint32_t asic_blank_end;
        uint32_t v_init;
        uint32_t v_fp2 = 0;
+       int32_t vertical_line_start;
 
        struct optc *optc1 = DCN10TG_FROM_TG(optc);
 
@@ -315,8 +316,9 @@ void optc1_set_vtg_params(struct timing_generator *optc,
                        patched_crtc_timing.v_border_top;
 
        /* if VSTARTUP is before VSYNC, FP2 is the offset, otherwise 0 */
-       if (optc1->vstartup_start > asic_blank_end)
-               v_fp2 = optc1->vstartup_start - asic_blank_end;
+       vertical_line_start = asic_blank_end - optc1->vstartup_start + 1;
+       if (vertical_line_start < 0)
+               v_fp2 = -vertical_line_start;
 
        /* Interlace */
        if (REG(OTG_INTERLACE_CONTROL)) {
index 9a459a8fe8a055802b3ff7ab550a619938c5eefa..8d1e52fb0393918ff2fd39dc0993484d43c38a02 100644 (file)
@@ -158,6 +158,7 @@ struct dcn_optc_registers {
        uint32_t OTG_GSL_WINDOW_Y;
        uint32_t OTG_VUPDATE_KEEPOUT;
        uint32_t OTG_CRC_CNTL;
+       uint32_t OTG_CRC_CNTL2;
        uint32_t OTG_CRC0_DATA_RG;
        uint32_t OTG_CRC0_DATA_B;
        uint32_t OTG_CRC0_WINDOWA_X_CONTROL;
@@ -475,7 +476,11 @@ struct dcn_optc_registers {
        type OPTC_DSC_SLICE_WIDTH;\
        type OPTC_SEGMENT_WIDTH;\
        type OPTC_DWB0_SOURCE_SELECT;\
-       type OPTC_DWB1_SOURCE_SELECT;
+       type OPTC_DWB1_SOURCE_SELECT;\
+       type OTG_CRC_DSC_MODE;\
+       type OTG_CRC_DATA_STREAM_COMBINE_MODE;\
+       type OTG_CRC_DATA_STREAM_SPLIT_MODE;\
+       type OTG_CRC_DATA_FORMAT;
 
 
 
index ba849aa31e6e77346fc6c25f028cf5251929e6a8..17d5cb422025e13dfb237d2ef6979364c2b10cde 100644 (file)
@@ -51,6 +51,7 @@
 #include "dce112/dce112_resource.h"
 #include "dcn10_hubp.h"
 #include "dcn10_hubbub.h"
+#include "dce/dce_panel_cntl.h"
 
 #include "soc15_hw_ip.h"
 #include "vega10_ip_offset.h"
@@ -329,6 +330,18 @@ static const struct dcn10_link_enc_mask le_mask = {
                LINK_ENCODER_MASK_SH_LIST_DCN10(_MASK)
 };
 
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+       { DCN_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+       DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+       DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
 static const struct dce110_aux_registers_shift aux_shift = {
        DCN10_AUX_MASK_SH_LIST(__SHIFT)
 };
@@ -817,6 +830,23 @@ struct link_encoder *dcn10_link_encoder_create(
        return &enc10->base;
 }
 
+static struct panel_cntl *dcn10_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+       struct dce_panel_cntl *panel_cntl =
+               kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+       if (!panel_cntl)
+               return NULL;
+
+       dce_panel_cntl_construct(panel_cntl,
+                       init_data,
+                       &panel_cntl_regs[init_data->inst],
+                       &panel_cntl_shift,
+                       &panel_cntl_mask);
+
+       return &panel_cntl->base;
+}
+
 struct clock_source *dcn10_clock_source_create(
        struct dc_context *ctx,
        struct dc_bios *bios,
@@ -1091,24 +1121,6 @@ static enum dc_status build_mapped_resource(
 {
        struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
 
-       /*TODO Seems unneeded anymore */
-       /*      if (old_context && resource_is_stream_unchanged(old_context, stream)) {
-                       if (stream != NULL && old_context->streams[i] != NULL) {
-                                todo: shouldn't have to copy missing parameter here
-                               resource_build_bit_depth_reduction_params(stream,
-                                               &stream->bit_depth_params);
-                               stream->clamping.pixel_encoding =
-                                               stream->timing.pixel_encoding;
-
-                               resource_build_bit_depth_reduction_params(stream,
-                                                               &stream->bit_depth_params);
-                               build_clamping_params(stream);
-
-                               continue;
-                       }
-               }
-       */
-
        if (!pipe_ctx)
                return DC_ERROR_UNEXPECTED;
 
@@ -1301,6 +1313,7 @@ static const struct dc_cap_funcs cap_funcs = {
 static const struct resource_funcs dcn10_res_pool_funcs = {
        .destroy = dcn10_destroy_resource_pool,
        .link_enc_create = dcn10_link_encoder_create,
+       .panel_cntl_create = dcn10_panel_cntl_create,
        .validate_bandwidth = dcn_validate_bandwidth,
        .acquire_idle_pipe_for_layer = dcn10_acquire_idle_pipe_for_layer,
        .validate_plane = dcn10_validate_plane,
@@ -1363,6 +1376,40 @@ static bool dcn10_resource_construct(
        /* Raven DP PHY HBR2 eye diagram pattern is not stable. Use TP4 */
        dc->caps.force_dp_tps4_for_cp2520 = true;
 
+       /* Color pipeline capabilities */
+       dc->caps.color.dpp.dcn_arch = 1;
+       dc->caps.color.dpp.input_lut_shared = 1;
+       dc->caps.color.dpp.icsc = 1;
+       dc->caps.color.dpp.dgam_ram = 1;
+       dc->caps.color.dpp.dgam_rom_caps.srgb = 1;
+       dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1;
+       dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 0;
+       dc->caps.color.dpp.dgam_rom_caps.pq = 0;
+       dc->caps.color.dpp.dgam_rom_caps.hlg = 0;
+       dc->caps.color.dpp.post_csc = 0;
+       dc->caps.color.dpp.gamma_corr = 0;
+
+       dc->caps.color.dpp.hw_3d_lut = 0;
+       dc->caps.color.dpp.ogam_ram = 1; // RGAM on DCN1
+       dc->caps.color.dpp.ogam_rom_caps.srgb = 1;
+       dc->caps.color.dpp.ogam_rom_caps.bt2020 = 1;
+       dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0;
+       dc->caps.color.dpp.ogam_rom_caps.pq = 0;
+       dc->caps.color.dpp.ogam_rom_caps.hlg = 0;
+       dc->caps.color.dpp.ocsc = 1;
+
+       /* no post-blend color operations */
+       dc->caps.color.mpc.gamut_remap = 0;
+       dc->caps.color.mpc.num_3dluts = 0;
+       dc->caps.color.mpc.shared_3d_lut = 0;
+       dc->caps.color.mpc.ogam_ram = 0;
+       dc->caps.color.mpc.ogam_rom_caps.srgb = 0;
+       dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0;
+       dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0;
+       dc->caps.color.mpc.ogam_rom_caps.pq = 0;
+       dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
+       dc->caps.color.mpc.ocsc = 0;
+
        if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
                dc->debug = debug_defaults_drv;
        else
index 7eba9333c3285dc9420b32cf4f530888c93b2794..07b2f9399671dbca108cbb021d8c7cc988290739 100644 (file)
@@ -1274,7 +1274,6 @@ static void enc1_se_audio_setup(
 {
        struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
 
-       uint32_t speakers = 0;
        uint32_t channels = 0;
 
        ASSERT(audio_info);
@@ -1282,7 +1281,6 @@ static void enc1_se_audio_setup(
                /* This should not happen.it does so we don't get BSOD*/
                return;
 
-       speakers = audio_info->flags.info.ALLSPEAKERS;
        channels = speakers_to_channels(audio_info->flags.speaker_flags).all;
 
        /* setup the audio stream source select (audio -> dig mapping) */
index 501532dd523ade2ee033e3e01c23a15b36e74502..c478213ba7adc600306420e63077141c00496b9d 100644 (file)
@@ -80,6 +80,7 @@ struct dcn20_hubbub {
        const struct dcn_hubbub_mask *masks;
        unsigned int debug_test_index_pstate;
        struct dcn_watermark_set watermarks;
+       int num_vmid;
        struct dcn20_vmid vmid[16];
        unsigned int detile_buf_size;
 };
index a023a4d59f412ed76cb1fc8308c358c8925f8711..da5333d165ace594960bff5a8a667ea943b4256d 100644 (file)
@@ -961,8 +961,7 @@ void dcn20_blank_pixel_data(
        width = width / odm_cnt;
 
        if (blank) {
-               if (stream_res->abm)
-                       stream_res->abm->funcs->set_abm_immediate_disable(stream_res->abm);
+               dc->hwss.set_abm_immediate_disable(pipe_ctx);
 
                if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) {
                        test_pattern = CONTROLLER_DP_TEST_PATTERN_COLORSQUARES;
@@ -997,7 +996,8 @@ void dcn20_blank_pixel_data(
 
        if (!blank)
                if (stream_res->abm) {
-                       stream_res->abm->funcs->set_pipe(stream_res->abm, stream_res->tg->inst + 1);
+                       stream_res->abm->funcs->set_pipe(stream_res->abm, stream_res->tg->inst + 1,
+                                       stream->link->panel_cntl->inst);
                        stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
                }
 }
@@ -1478,8 +1478,11 @@ static void dcn20_program_pipe(
        if (pipe_ctx->update_flags.bits.odm)
                hws->funcs.update_odm(dc, context, pipe_ctx);
 
-       if (pipe_ctx->update_flags.bits.enable)
+       if (pipe_ctx->update_flags.bits.enable) {
                dcn20_enable_plane(dc, pipe_ctx, context);
+               if (dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes)
+                       dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes(dc->res_pool->hubbub);
+       }
 
        if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw)
                dcn20_update_dchubp_dpp(dc, pipe_ctx, context);
@@ -2037,8 +2040,7 @@ static void dcn20_reset_back_end_for_pipe(
         */
        if (pipe_ctx->top_pipe == NULL) {
 
-               if (pipe_ctx->stream_res.abm)
-                       pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm);
+               dc->hwss.set_abm_immediate_disable(pipe_ctx);
 
                pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
 
@@ -2171,6 +2173,13 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
         */
        mpcc_id = hubp->inst;
 
+       /* If there is no full update, don't need to touch MPC tree*/
+       if (!pipe_ctx->plane_state->update_flags.bits.full_update &&
+               !pipe_ctx->update_flags.bits.mpcc) {
+               mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
+               return;
+       }
+
        /* check if this MPCC is already being used */
        new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
        /* remove MPCC if being used */
index 8334bbd6eabbe1e7e1f485e7ea853d368d9437c8..2fbde4241559f11c6ffa8e9cea56a02e60d98077 100644 (file)
@@ -84,6 +84,8 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
        .set_flip_control_gsl = dcn20_set_flip_control_gsl,
        .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
        .calc_vupdate_position = dcn10_calc_vupdate_position,
+       .set_backlight_level = dce110_set_backlight_level,
+       .set_abm_immediate_disable = dce110_set_abm_immediate_disable,
 };
 
 static const struct hwseq_private_funcs dcn20_private_funcs = {
@@ -99,8 +101,6 @@ static const struct hwseq_private_funcs dcn20_private_funcs = {
        .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap,
        .enable_stream_timing = dcn20_enable_stream_timing,
        .edp_backlight_control = dce110_edp_backlight_control,
-       .is_panel_backlight_on = dce110_is_panel_backlight_on,
-       .is_panel_powered_on = dce110_is_panel_powered_on,
        .disable_stream_gating = dcn20_disable_stream_gating,
        .enable_stream_gating = dcn20_enable_stream_gating,
        .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
index e4ac73035c84a4522e2781ee44ac176157d2b46b..8d209dae66e6ae8a8130edfd6da07424346d6a19 100644 (file)
 #define IND_REG(index) \
        (enc10->link_regs->index)
 
+#ifndef MAX
+#define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
+#endif
+#ifndef MIN
+#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
+#endif
 
 static struct mpll_cfg dcn2_mpll_cfg[] = {
        // RBR
@@ -260,6 +266,38 @@ void dcn20_link_encoder_enable_dp_output(
 
 }
 
+void dcn20_link_encoder_get_max_link_cap(struct link_encoder *enc,
+       struct dc_link_settings *link_settings)
+{
+       struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+       uint32_t is_in_usb_c_dp4_mode = 0;
+
+       dcn10_link_encoder_get_max_link_cap(enc, link_settings);
+
+       /* in usb c dp2 mode, max lane count is 2 */
+       if (enc->funcs->is_in_alt_mode && enc->funcs->is_in_alt_mode(enc)) {
+               REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode);
+               if (!is_in_usb_c_dp4_mode)
+                       link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count);
+       }
+
+}
+
+bool dcn20_link_encoder_is_in_alt_mode(struct link_encoder *enc)
+{
+       struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+       uint32_t dp_alt_mode_disable = 0;
+       bool is_usb_c_alt_mode = false;
+
+       if (enc->features.flags.bits.DP_IS_USB_C) {
+               /* if value == 1 alt mode is disabled, otherwise it is enabled */
+               REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable);
+               is_usb_c_alt_mode = (dp_alt_mode_disable == 0);
+       }
+
+       return is_usb_c_alt_mode;
+}
+
 #define AUX_REG(reg)\
        (enc10->aux_regs->reg)
 
@@ -338,6 +376,8 @@ static const struct link_encoder_funcs dcn20_link_enc_funcs = {
        .fec_is_active = enc2_fec_is_active,
        .get_dig_mode = dcn10_get_dig_mode,
        .get_dig_frontend = dcn10_get_dig_frontend,
+       .is_in_alt_mode = dcn20_link_encoder_is_in_alt_mode,
+       .get_max_link_cap = dcn20_link_encoder_get_max_link_cap,
 };
 
 void dcn20_link_encoder_construct(
index 8cab8107fd94c2094610685a1e2bd6db0f15c027..284a1ee4d249ef1bfe15326bf01766f1d9a2d121 100644 (file)
@@ -343,6 +343,10 @@ void dcn20_link_encoder_enable_dp_output(
        const struct dc_link_settings *link_settings,
        enum clock_source_id clock_source);
 
+bool dcn20_link_encoder_is_in_alt_mode(struct link_encoder *enc);
+void dcn20_link_encoder_get_max_link_cap(struct link_encoder *enc,
+       struct dc_link_settings *link_settings);
+
 void dcn20_link_encoder_construct(
        struct dcn20_link_encoder *enc20,
        const struct encoder_init_data *init_data,
index 570dfd9a243f732e878fc4b7e3365d9fa3fc28e5..99cc095dc33c7e49777710e137c535d5cf088d9b 100644 (file)
@@ -452,7 +452,7 @@ void mpc2_set_output_gamma(
                next_mode = LUT_RAM_A;
 
        mpc20_power_on_ogam_lut(mpc, mpcc_id, true);
-       mpc20_configure_ogam_lut(mpc, mpcc_id, next_mode == LUT_RAM_A ? true:false);
+       mpc20_configure_ogam_lut(mpc, mpcc_id, next_mode == LUT_RAM_A);
 
        if (next_mode == LUT_RAM_A)
                mpc2_program_luta(mpc, mpcc_id, params);
index d875b0c38fded4928c81e2173ba3b425f5e54963..8c16967fe01807c96eacb37576690a1070a1a2c6 100644 (file)
@@ -409,6 +409,18 @@ void optc2_program_manual_trigger(struct timing_generator *optc)
                        OTG_TRIGA_MANUAL_TRIG, 1);
 }
 
+bool optc2_configure_crc(struct timing_generator *optc,
+                         const struct crc_params *params)
+{
+       struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+       REG_SET_2(OTG_CRC_CNTL2, 0,
+                       OTG_CRC_DSC_MODE, params->dsc_mode,
+                       OTG_CRC_DATA_STREAM_COMBINE_MODE, params->odm_mode);
+
+       return optc1_configure_crc(optc, params);
+}
+
 static struct timing_generator_funcs dcn20_tg_funcs = {
                .validate_timing = optc1_validate_timing,
                .program_timing = optc1_program_timing,
@@ -452,7 +464,7 @@ static struct timing_generator_funcs dcn20_tg_funcs = {
                .clear_optc_underflow = optc1_clear_optc_underflow,
                .setup_global_swap_lock = NULL,
                .get_crc = optc1_get_crc,
-               .configure_crc = optc1_configure_crc,
+               .configure_crc = optc2_configure_crc,
                .set_dsc_config = optc2_set_dsc_config,
                .set_dwb_source = optc2_set_dwb_source,
                .set_odm_bypass = optc2_set_odm_bypass,
index 239cc40ae474be3765e7bbd54c9766a52b2f9712..e0a0a8a8e2c606214f72fc32cc6e8c2ec6bfc625 100644 (file)
@@ -36,6 +36,7 @@
        SRI(OTG_GSL_WINDOW_Y, OTG, inst),\
        SRI(OTG_VUPDATE_KEEPOUT, OTG, inst),\
        SRI(OTG_DSC_START_POSITION, OTG, inst),\
+       SRI(OTG_CRC_CNTL2, OTG, inst),\
        SRI(OPTC_DATA_FORMAT_CONTROL, ODM, inst),\
        SRI(OPTC_BYTES_PER_PIXEL, ODM, inst),\
        SRI(OPTC_WIDTH_CONTROL, ODM, inst),\
        SF(OTG0_OTG_GSL_CONTROL, OTG_MASTER_UPDATE_LOCK_GSL_EN, mask_sh), \
        SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_X, mask_sh), \
        SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_LINE_NUM, mask_sh),\
+       SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DSC_MODE, mask_sh),\
+       SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_COMBINE_MODE, mask_sh),\
+       SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_SPLIT_MODE, mask_sh),\
+       SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_FORMAT, mask_sh),\
        SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG0_SRC_SEL, mask_sh),\
        SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG1_SRC_SEL, mask_sh),\
        SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_NUM_OF_INPUT_SEGMENT, mask_sh),\
@@ -109,4 +114,6 @@ void optc2_lock_doublebuffer_enable(struct timing_generator *optc);
 void optc2_setup_manual_trigger(struct timing_generator *optc);
 void optc2_program_manual_trigger(struct timing_generator *optc);
 bool optc2_is_two_pixels_per_containter(const struct dc_crtc_timing *timing);
+bool optc2_configure_crc(struct timing_generator *optc,
+                         const struct crc_params *params);
 #endif /* __DC_OPTC_DCN20_H__ */
index e4348e3b638985a9d68ef14b7b15a1f9c36696c7..cef1aa938ab542c397786cb9a8c4671df925a03e 100644 (file)
@@ -61,6 +61,7 @@
 #include "dcn20_dccg.h"
 #include "dcn20_vmid.h"
 #include "dc_link_ddc.h"
+#include "dce/dce_panel_cntl.h"
 
 #include "navi10_ip_offset.h"
 
@@ -691,6 +692,18 @@ static const struct dcn10_link_enc_mask le_mask = {
        DPCS_DCN2_MASK_SH_LIST(_MASK)
 };
 
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+       { DCN_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+       DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+       DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
 #define ipp_regs(id)\
 [id] = {\
        IPP_REG_LIST_DCN20(id),\
@@ -1293,6 +1306,23 @@ struct link_encoder *dcn20_link_encoder_create(
        return &enc20->enc10.base;
 }
 
+static struct panel_cntl *dcn20_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+       struct dce_panel_cntl *panel_cntl =
+               kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+       if (!panel_cntl)
+               return NULL;
+
+       dce_panel_cntl_construct(panel_cntl,
+                       init_data,
+                       &panel_cntl_regs[init_data->inst],
+                       &panel_cntl_shift,
+                       &panel_cntl_mask);
+
+       return &panel_cntl->base;
+}
+
 struct clock_source *dcn20_clock_source_create(
        struct dc_context *ctx,
        struct dc_bios *bios,
@@ -1623,24 +1653,6 @@ enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state
        enum dc_status status = DC_OK;
        struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
 
-       /*TODO Seems unneeded anymore */
-       /*      if (old_context && resource_is_stream_unchanged(old_context, stream)) {
-                       if (stream != NULL && old_context->streams[i] != NULL) {
-                                todo: shouldn't have to copy missing parameter here
-                               resource_build_bit_depth_reduction_params(stream,
-                                               &stream->bit_depth_params);
-                               stream->clamping.pixel_encoding =
-                                               stream->timing.pixel_encoding;
-
-                               resource_build_bit_depth_reduction_params(stream,
-                                                               &stream->bit_depth_params);
-                               build_clamping_params(stream);
-
-                               continue;
-                       }
-               }
-       */
-
        if (!pipe_ctx)
                return DC_ERROR_UNEXPECTED;
 
@@ -1651,22 +1663,32 @@ enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state
 }
 
 
-static void acquire_dsc(struct resource_context *res_ctx,
-                       const struct resource_pool *pool,
+void dcn20_acquire_dsc(const struct dc *dc,
+                       struct resource_context *res_ctx,
                        struct display_stream_compressor **dsc,
                        int pipe_idx)
 {
        int i;
+       const struct resource_pool *pool = dc->res_pool;
+       struct display_stream_compressor *dsc_old = dc->current_state->res_ctx.pipe_ctx[pipe_idx].stream_res.dsc;
 
-       ASSERT(*dsc == NULL);
+       ASSERT(*dsc == NULL); /* If this ASSERT fails, dsc was not released properly */
        *dsc = NULL;
 
+       /* Always do 1-to-1 mapping when number of DSCs is same as number of pipes */
        if (pool->res_cap->num_dsc == pool->res_cap->num_opp) {
                *dsc = pool->dscs[pipe_idx];
                res_ctx->is_dsc_acquired[pipe_idx] = true;
                return;
        }
 
+       /* Return old DSC to avoid the need for re-programming */
+       if (dsc_old && !res_ctx->is_dsc_acquired[dsc_old->inst]) {
+               *dsc = dsc_old;
+               res_ctx->is_dsc_acquired[dsc_old->inst] = true;
+               return ;
+       }
+
        /* Find first free DSC */
        for (i = 0; i < pool->res_cap->num_dsc; i++)
                if (!res_ctx->is_dsc_acquired[i]) {
@@ -1698,7 +1720,6 @@ enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc,
 {
        enum dc_status result = DC_OK;
        int i;
-       const struct resource_pool *pool = dc->res_pool;
 
        /* Get a DSC if required and available */
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1710,7 +1731,7 @@ enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc,
                if (pipe_ctx->stream_res.dsc)
                        continue;
 
-               acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc, i);
+               dcn20_acquire_dsc(dc, &dc_ctx->res_ctx, &pipe_ctx->stream_res.dsc, i);
 
                /* The number of DSCs can be less than the number of pipes */
                if (!pipe_ctx->stream_res.dsc) {
@@ -1838,12 +1859,13 @@ static void swizzle_to_dml_params(
 }
 
 bool dcn20_split_stream_for_odm(
+               const struct dc *dc,
                struct resource_context *res_ctx,
-               const struct resource_pool *pool,
                struct pipe_ctx *prev_odm_pipe,
                struct pipe_ctx *next_odm_pipe)
 {
        int pipe_idx = next_odm_pipe->pipe_idx;
+       const struct resource_pool *pool = dc->res_pool;
 
        *next_odm_pipe = *prev_odm_pipe;
 
@@ -1901,7 +1923,7 @@ bool dcn20_split_stream_for_odm(
        }
        next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx];
        if (next_odm_pipe->stream->timing.flags.DSC == 1) {
-               acquire_dsc(res_ctx, pool, &next_odm_pipe->stream_res.dsc, next_odm_pipe->pipe_idx);
+               dcn20_acquire_dsc(dc, res_ctx, &next_odm_pipe->stream_res.dsc, next_odm_pipe->pipe_idx);
                ASSERT(next_odm_pipe->stream_res.dsc);
                if (next_odm_pipe->stream_res.dsc == NULL)
                        return false;
@@ -1939,8 +1961,6 @@ void dcn20_split_stream_for_mpc(
        secondary_pipe->top_pipe = primary_pipe;
 
        ASSERT(primary_pipe->plane_state);
-       resource_build_scaling_params(primary_pipe);
-       resource_build_scaling_params(secondary_pipe);
 }
 
 void dcn20_populate_dml_writeback_from_context(
@@ -2216,12 +2236,12 @@ int dcn20_populate_dml_pipes_from_context(
                                        || pipes[pipe_cnt].pipe.dest.odm_combine != dm_odm_combine_mode_disabled;
                        pipes[pipe_cnt].pipe.src.source_scan = pln->rotation == ROTATION_ANGLE_90
                                        || pln->rotation == ROTATION_ANGLE_270 ? dm_vert : dm_horz;
-                       pipes[pipe_cnt].pipe.src.viewport_y_y = scl->viewport.y;
-                       pipes[pipe_cnt].pipe.src.viewport_y_c = scl->viewport_c.y;
-                       pipes[pipe_cnt].pipe.src.viewport_width = scl->viewport.width;
-                       pipes[pipe_cnt].pipe.src.viewport_width_c = scl->viewport_c.width;
-                       pipes[pipe_cnt].pipe.src.viewport_height = scl->viewport.height;
-                       pipes[pipe_cnt].pipe.src.viewport_height_c = scl->viewport_c.height;
+                       pipes[pipe_cnt].pipe.src.viewport_y_y = scl->viewport_unadjusted.y;
+                       pipes[pipe_cnt].pipe.src.viewport_y_c = scl->viewport_c_unadjusted.y;
+                       pipes[pipe_cnt].pipe.src.viewport_width = scl->viewport_unadjusted.width;
+                       pipes[pipe_cnt].pipe.src.viewport_width_c = scl->viewport_c_unadjusted.width;
+                       pipes[pipe_cnt].pipe.src.viewport_height = scl->viewport_unadjusted.height;
+                       pipes[pipe_cnt].pipe.src.viewport_height_c = scl->viewport_c_unadjusted.height;
                        pipes[pipe_cnt].pipe.src.surface_width_y = pln->plane_size.surface_size.width;
                        pipes[pipe_cnt].pipe.src.surface_height_y = pln->plane_size.surface_size.height;
                        pipes[pipe_cnt].pipe.src.surface_width_c = pln->plane_size.chroma_size.width;
@@ -2570,13 +2590,15 @@ int dcn20_validate_apply_pipe_split_flags(
                struct dc *dc,
                struct dc_state *context,
                int vlevel,
-               bool *split,
+               int *split,
                bool *merge)
 {
        int i, pipe_idx, vlevel_split;
        int plane_count = 0;
        bool force_split = false;
        bool avoid_split = dc->debug.pipe_split_policy == MPC_SPLIT_AVOID;
+       struct vba_vars_st *v = &context->bw_ctx.dml.vba;
+       int max_mpc_comb = v->maxMpcComb;
 
        if (context->stream_count > 1) {
                if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP)
@@ -2584,10 +2606,22 @@ int dcn20_validate_apply_pipe_split_flags(
        } else if (dc->debug.force_single_disp_pipe_split)
                        force_split = true;
 
-       /* TODO: fix dc bugs and remove this split threshold thing */
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 
+               /**
+                * Workaround for avoiding pipe-split in cases where we'd split
+                * planes that are too small, resulting in splits that aren't
+                * valid for the scaler.
+                */
+               if (pipe->plane_state &&
+                   (pipe->plane_state->dst_rect.width <= 16 ||
+                    pipe->plane_state->dst_rect.height <= 16 ||
+                    pipe->plane_state->src_rect.width <= 16 ||
+                    pipe->plane_state->src_rect.height <= 16))
+                       avoid_split = true;
+
+               /* TODO: fix dc bugs and remove this split threshold thing */
                if (pipe->stream && !pipe->prev_odm_pipe &&
                                (!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state))
                        ++plane_count;
@@ -2602,26 +2636,35 @@ int dcn20_validate_apply_pipe_split_flags(
                                continue;
 
                        for (vlevel_split = vlevel; vlevel <= context->bw_ctx.dml.soc.num_states; vlevel++)
-                               if (context->bw_ctx.dml.vba.NoOfDPP[vlevel][0][pipe_idx] == 1)
+                               if (v->NoOfDPP[vlevel][0][pipe_idx] == 1 &&
+                                               v->ModeSupport[vlevel][0])
                                        break;
                        /* Impossible to not split this pipe */
                        if (vlevel > context->bw_ctx.dml.soc.num_states)
                                vlevel = vlevel_split;
+                       else
+                               max_mpc_comb = 0;
                        pipe_idx++;
                }
-               context->bw_ctx.dml.vba.maxMpcComb = 0;
+               v->maxMpcComb = max_mpc_comb;
        }
 
        /* Split loop sets which pipe should be split based on dml outputs and dc flags */
        for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
                struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-               int pipe_plane = context->bw_ctx.dml.vba.pipe_plane[pipe_idx];
+               int pipe_plane = v->pipe_plane[pipe_idx];
+               bool split4mpc = context->stream_count == 1 && plane_count == 1
+                               && dc->config.enable_4to1MPC && dc->res_pool->pipe_count >= 4;
 
                if (!context->res_ctx.pipe_ctx[i].stream)
                        continue;
 
-               if (force_split || context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_plane] > 1)
-                       split[i] = true;
+               if (force_split || v->NoOfDPP[vlevel][max_mpc_comb][pipe_plane] > 1) {
+                       if (split4mpc)
+                               split[i] = 4;
+                       else
+                               split[i] = 2;
+               }
                if ((pipe->stream->view_format ==
                                VIEW_3D_FORMAT_SIDE_BY_SIDE ||
                                pipe->stream->view_format ==
@@ -2630,50 +2673,75 @@ int dcn20_validate_apply_pipe_split_flags(
                                TIMING_3D_FORMAT_TOP_AND_BOTTOM ||
                                 pipe->stream->timing.timing_3d_format ==
                                TIMING_3D_FORMAT_SIDE_BY_SIDE))
-                       split[i] = true;
+                       split[i] = 2;
                if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {
-                       split[i] = true;
-                       context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_2to1;
+                       split[i] = 2;
+                       v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_2to1;
                }
-               context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] =
-                       context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_plane];
-
-               if (pipe->prev_odm_pipe && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] != dm_odm_combine_mode_disabled) {
-                       /*Already split odm pipe tree, don't try to split again*/
-                       split[i] = false;
-                       split[pipe->prev_odm_pipe->pipe_idx] = false;
-               } else if (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state
-                               && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) {
-                       /*Already split mpc tree, don't try to split again, assumes only 2x mpc combine*/
-                       split[i] = false;
-                       split[pipe->top_pipe->pipe_idx] = false;
-               } else if (pipe->prev_odm_pipe || (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state)) {
-                       if (split[i] == false) {
-                               /*Exiting mpc/odm combine*/
-                               merge[i] = true;
+               v->ODMCombineEnabled[pipe_plane] =
+                       v->ODMCombineEnablePerState[vlevel][pipe_plane];
+
+               if (v->ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) {
+                       if (get_num_mpc_splits(pipe) == 1) {
+                               /*If need split for mpc but 2 way split already*/
+                               if (split[i] == 4)
+                                       split[i] = 2; /* 2 -> 4 MPC */
+                               else if (split[i] == 2)
+                                       split[i] = 0; /* 2 -> 2 MPC */
+                               else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state)
+                                       merge[i] = true; /* 2 -> 1 MPC */
+                       } else if (get_num_mpc_splits(pipe) == 3) {
+                               /*If need split for mpc but 4 way split already*/
+                               if (split[i] == 2 && ((pipe->top_pipe && !pipe->top_pipe->top_pipe)
+                                               || !pipe->bottom_pipe)) {
+                                       merge[i] = true; /* 4 -> 2 MPC */
+                               } else if (split[i] == 0 && pipe->top_pipe &&
+                                               pipe->top_pipe->plane_state == pipe->plane_state)
+                                       merge[i] = true; /* 4 -> 1 MPC */
+                               split[i] = 0;
+                       } else if (get_num_odm_splits(pipe)) {
+                               /* ODM -> MPC transition */
+                               ASSERT(0); /* NOT expected yet */
                                if (pipe->prev_odm_pipe) {
-                                       ASSERT(0); /*should not actually happen yet*/
-                                       merge[pipe->prev_odm_pipe->pipe_idx] = true;
-                               } else
-                                       merge[pipe->top_pipe->pipe_idx] = true;
-                       } else {
-                               /*Transition from mpc combine to odm combine or vice versa*/
-                               ASSERT(0); /*should not actually happen yet*/
-                               split[i] = true;
-                               merge[i] = true;
-                               if (pipe->prev_odm_pipe) {
-                                       split[pipe->prev_odm_pipe->pipe_idx] = true;
-                                       merge[pipe->prev_odm_pipe->pipe_idx] = true;
-                               } else {
-                                       split[pipe->top_pipe->pipe_idx] = true;
-                                       merge[pipe->top_pipe->pipe_idx] = true;
+                                       split[i] = 0;
+                                       merge[i] = true;
+                               }
+                       }
+               } else {
+                       if (get_num_odm_splits(pipe) == 1) {
+                               /*If need split for odm but 2 way split already*/
+                               if (split[i] == 4)
+                                       split[i] = 2; /* 2 -> 4 ODM */
+                               else if (split[i] == 2)
+                                       split[i] = 0; /* 2 -> 2 ODM */
+                               else if (pipe->prev_odm_pipe) {
+                                       ASSERT(0); /* NOT expected yet */
+                                       merge[i] = true; /* exit ODM */
+                               }
+                       } else if (get_num_odm_splits(pipe) == 3) {
+                               /*If need split for odm but 4 way split already*/
+                               if (split[i] == 2 && ((pipe->prev_odm_pipe && !pipe->prev_odm_pipe->prev_odm_pipe)
+                                               || !pipe->next_odm_pipe)) {
+                                       ASSERT(0); /* NOT expected yet */
+                                       merge[i] = true; /* 4 -> 2 ODM */
+                               } else if (split[i] == 0 && pipe->prev_odm_pipe) {
+                                       ASSERT(0); /* NOT expected yet */
+                                       merge[i] = true; /* exit ODM */
+                               }
+                               split[i] = 0;
+                       } else if (get_num_mpc_splits(pipe)) {
+                               /* MPC -> ODM transition */
+                               ASSERT(0); /* NOT expected yet */
+                               if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
+                                       split[i] = 0;
+                                       merge[i] = true;
                                }
                        }
                }
 
                /* Adjust dppclk when split is forced, do not bother with dispclk */
-               if (split[i] && context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1)
-                       context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] /= 2;
+               if (split[i] != 0 && v->NoOfDPP[vlevel][max_mpc_comb][pipe_idx] == 1)
+                       v->RequiredDPPCLK[vlevel][max_mpc_comb][pipe_idx] /= 2;
                pipe_idx++;
        }
 
@@ -2689,7 +2757,7 @@ bool dcn20_fast_validate_bw(
                int *vlevel_out)
 {
        bool out = false;
-       bool split[MAX_PIPES] = { false };
+       int split[MAX_PIPES] = { 0 };
        int pipe_cnt, i, pipe_idx, vlevel;
 
        ASSERT(pipes);
@@ -2731,7 +2799,7 @@ bool dcn20_fast_validate_bw(
                        hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe);
                        ASSERT(hsplit_pipe);
                        if (!dcn20_split_stream_for_odm(
-                                       &context->res_ctx, dc->res_pool,
+                                       dc, &context->res_ctx,
                                        pipe, hsplit_pipe))
                                goto validate_fail;
                        pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx;
@@ -2749,7 +2817,7 @@ bool dcn20_fast_validate_bw(
                                && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx])
                        goto validate_fail;
 
-               if (split[i]) {
+               if (split[i] == 2) {
                        if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state) {
                                /* pipe not split previously needs split */
                                hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe);
@@ -2760,14 +2828,17 @@ bool dcn20_fast_validate_bw(
                                }
                                if (context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) {
                                        if (!dcn20_split_stream_for_odm(
-                                                       &context->res_ctx, dc->res_pool,
+                                                       dc, &context->res_ctx,
                                                        pipe, hsplit_pipe))
                                                goto validate_fail;
                                        dcn20_build_mapped_resource(dc, context, pipe->stream);
-                               } else
+                               } else {
                                        dcn20_split_stream_for_mpc(
-                                               &context->res_ctx, dc->res_pool,
-                                               pipe, hsplit_pipe);
+                                                       &context->res_ctx, dc->res_pool,
+                                                       pipe, hsplit_pipe);
+                                       if (!resource_build_scaling_params(pipe) || !resource_build_scaling_params(hsplit_pipe))
+                                               goto validate_fail;
+                               }
                                pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx;
                        }
                } else if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) {
@@ -3007,7 +3078,7 @@ void dcn20_calculate_dlg_params(
                                pipe_idx,
                                cstate_en,
                                context->bw_ctx.bw.dcn.clk.p_state_change_support,
-                               false, false, false);
+                               false, false, true);
 
                context->bw_ctx.dml.funcs.rq_dlg_get_rq_reg(&context->bw_ctx.dml,
                                &context->res_ctx.pipe_ctx[i].rq_regs,
@@ -3091,6 +3162,8 @@ static noinline bool dcn20_validate_bandwidth_fp(struct dc *dc,
        p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us;
        context->bw_ctx.dml.soc.disable_dram_clock_change_vactive_support =
                dc->debug.disable_dram_clock_change_vactive_support;
+       context->bw_ctx.dml.soc.allow_dram_clock_one_display_vactive =
+               dc->debug.enable_dram_clock_change_one_display_vactive;
 
        if (fast_validate) {
                return dcn20_validate_bandwidth_internal(dc, context, true);
@@ -3189,8 +3262,6 @@ static struct dc_cap_funcs cap_funcs = {
 
 enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_state)
 {
-       enum dc_status result = DC_OK;
-
        enum surface_pixel_format surf_pix_format = plane_state->format;
        unsigned int bpp = resource_pixel_format_to_bpp(surf_pix_format);
 
@@ -3202,12 +3273,13 @@ enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_stat
                swizzle = DC_SW_64KB_S;
 
        plane_state->tiling_info.gfx9.swizzle = swizzle;
-       return result;
+       return DC_OK;
 }
 
 static struct resource_funcs dcn20_res_pool_funcs = {
        .destroy = dcn20_destroy_resource_pool,
        .link_enc_create = dcn20_link_encoder_create,
+       .panel_cntl_create = dcn20_panel_cntl_create,
        .validate_bandwidth = dcn20_validate_bandwidth,
        .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
        .add_stream_to_ctx = dcn20_add_stream_to_ctx,
@@ -3446,6 +3518,13 @@ void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st
                bb->dram_clock_change_latency_us =
                                dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
        }
+
+       if ((int)(bb->dummy_pstate_latency_us * 1000)
+                               != dc->bb_overrides.dummy_clock_change_latency_ns
+                       && dc->bb_overrides.dummy_clock_change_latency_ns) {
+               bb->dummy_pstate_latency_us =
+                               dc->bb_overrides.dummy_clock_change_latency_ns / 1000.0;
+       }
 }
 
 static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb(
@@ -3681,9 +3760,42 @@ static bool dcn20_resource_construct(
        dc->caps.max_slave_planes = 1;
        dc->caps.post_blend_color_processing = true;
        dc->caps.force_dp_tps4_for_cp2520 = true;
-       dc->caps.hw_3d_lut = true;
        dc->caps.extended_aux_timeout_support = true;
 
+       /* Color pipeline capabilities */
+       dc->caps.color.dpp.dcn_arch = 1;
+       dc->caps.color.dpp.input_lut_shared = 0;
+       dc->caps.color.dpp.icsc = 1;
+       dc->caps.color.dpp.dgam_ram = 1;
+       dc->caps.color.dpp.dgam_rom_caps.srgb = 1;
+       dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1;
+       dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 0;
+       dc->caps.color.dpp.dgam_rom_caps.pq = 0;
+       dc->caps.color.dpp.dgam_rom_caps.hlg = 0;
+       dc->caps.color.dpp.post_csc = 0;
+       dc->caps.color.dpp.gamma_corr = 0;
+
+       dc->caps.color.dpp.hw_3d_lut = 1;
+       dc->caps.color.dpp.ogam_ram = 1;
+       // no OGAM ROM on DCN2, only MPC ROM
+       dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
+       dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0;
+       dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0;
+       dc->caps.color.dpp.ogam_rom_caps.pq = 0;
+       dc->caps.color.dpp.ogam_rom_caps.hlg = 0;
+       dc->caps.color.dpp.ocsc = 0;
+
+       dc->caps.color.mpc.gamut_remap = 0;
+       dc->caps.color.mpc.num_3dluts = 0;
+       dc->caps.color.mpc.shared_3d_lut = 0;
+       dc->caps.color.mpc.ogam_ram = 1;
+       dc->caps.color.mpc.ogam_rom_caps.srgb = 0;
+       dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0;
+       dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0;
+       dc->caps.color.mpc.ogam_rom_caps.pq = 0;
+       dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
+       dc->caps.color.mpc.ocsc = 1;
+
        if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) {
                dc->debug = debug_defaults_drv;
        } else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
index 9d5bff9455fd0f7f6425de0fbc9ea9b50b1e7ed1..2c1959845c29a424d41dc47d662e6188dbc89ed1 100644 (file)
@@ -123,7 +123,7 @@ int dcn20_validate_apply_pipe_split_flags(
                struct dc *dc,
                struct dc_state *context,
                int vlevel,
-               bool *split,
+               int *split,
                bool *merge);
 void dcn20_release_dsc(struct resource_context *res_ctx,
                        const struct resource_pool *pool,
@@ -135,10 +135,14 @@ void dcn20_split_stream_for_mpc(
                struct pipe_ctx *primary_pipe,
                struct pipe_ctx *secondary_pipe);
 bool dcn20_split_stream_for_odm(
+               const struct dc *dc,
                struct resource_context *res_ctx,
-               const struct resource_pool *pool,
                struct pipe_ctx *prev_odm_pipe,
                struct pipe_ctx *next_odm_pipe);
+void dcn20_acquire_dsc(const struct dc *dc,
+                       struct resource_context *res_ctx,
+                       struct display_stream_compressor **dsc,
+                       int pipe_idx);
 struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
                struct resource_context *res_ctx,
                const struct resource_pool *pool,
index 5e2d14b897af7916d922b3dc962b531661df2959..129f0b62f7512bd4fb51377a198e5d0fe35e4340 100644 (file)
 #define FN(reg_name, field_name) \
        hubbub1->shifts->field_name, hubbub1->masks->field_name
 
-#ifdef NUM_VMID
-#undef NUM_VMID
-#endif
-#define NUM_VMID 16
-
 static uint32_t convert_and_clamp(
        uint32_t wm_ns,
        uint32_t refclk_mhz,
@@ -138,7 +133,7 @@ int hubbub21_init_dchub(struct hubbub *hubbub,
 
        dcn21_dchvm_init(hubbub);
 
-       return NUM_VMID;
+       return hubbub1->num_vmid;
 }
 
 bool hubbub21_program_urgent_watermarks(
index d285ba622d6103c76d7c91ccbc0590dd4ad233e0..960a0716dde53a924e3a24eca0f81178f3ab02a5 100644 (file)
@@ -778,21 +778,28 @@ void dmcub_PLAT_54186_wa(struct hubp *hubp, struct surface_flip_registers *flip_
 {
        struct dc_dmub_srv *dmcub = hubp->ctx->dmub_srv;
        struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
-       struct dmub_rb_cmd_PLAT_54186_wa PLAT_54186_wa = { 0 };
-
-       PLAT_54186_wa.header.type = DMUB_CMD__PLAT_54186_WA;
-       PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS;
-       PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_C = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_C;
-       PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH;
-       PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C;
-       PLAT_54186_wa.flip.flip_params.grph_stereo = flip_regs->grph_stereo;
-       PLAT_54186_wa.flip.flip_params.hubp_inst = hubp->inst;
-       PLAT_54186_wa.flip.flip_params.immediate = flip_regs->immediate;
-       PLAT_54186_wa.flip.flip_params.tmz_surface = flip_regs->tmz_surface;
-       PLAT_54186_wa.flip.flip_params.vmid = flip_regs->vmid;
+       union dmub_rb_cmd cmd;
+
+       memset(&cmd, 0, sizeof(cmd));
+
+       cmd.PLAT_54186_wa.header.type = DMUB_CMD__PLAT_54186_WA;
+       cmd.PLAT_54186_wa.header.payload_bytes = sizeof(cmd.PLAT_54186_wa.flip);
+       cmd.PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS =
+               flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS;
+       cmd.PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_C =
+               flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_C;
+       cmd.PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH =
+               flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH;
+       cmd.PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C =
+               flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C;
+       cmd.PLAT_54186_wa.flip.flip_params.grph_stereo = flip_regs->grph_stereo;
+       cmd.PLAT_54186_wa.flip.flip_params.hubp_inst = hubp->inst;
+       cmd.PLAT_54186_wa.flip.flip_params.immediate = flip_regs->immediate;
+       cmd.PLAT_54186_wa.flip.flip_params.tmz_surface = flip_regs->tmz_surface;
+       cmd.PLAT_54186_wa.flip.flip_params.vmid = flip_regs->vmid;
 
        PERF_TRACE();  // TODO: remove after performance is stable.
-       dc_dmub_srv_cmd_queue(dmcub, &PLAT_54186_wa.header);
+       dc_dmub_srv_cmd_queue(dmcub, &cmd);
        PERF_TRACE();  // TODO: remove after performance is stable.
        dc_dmub_srv_cmd_execute(dmcub);
        PERF_TRACE();  // TODO: remove after performance is stable.
index 4dd634118df2effa75d328eadd7f520dfd02dae7..a5baef7e7a7d681f372ccb1c701b3e5ba2759fd2 100644 (file)
@@ -87,11 +87,9 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
        .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
        .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
        .calc_vupdate_position = dcn10_calc_vupdate_position,
-       .set_cursor_position = dcn10_set_cursor_position,
-       .set_cursor_attribute = dcn10_set_cursor_attribute,
-       .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
-       .optimize_pwr_state = dcn21_optimize_pwr_state,
-       .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
+       .power_down = dce110_power_down,
+       .set_backlight_level = dce110_set_backlight_level,
+       .set_abm_immediate_disable = dce110_set_abm_immediate_disable,
 };
 
 static const struct hwseq_private_funcs dcn21_private_funcs = {
@@ -107,8 +105,6 @@ static const struct hwseq_private_funcs dcn21_private_funcs = {
        .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap,
        .enable_stream_timing = dcn20_enable_stream_timing,
        .edp_backlight_control = dce110_edp_backlight_control,
-       .is_panel_backlight_on = dce110_is_panel_backlight_on,
-       .is_panel_powered_on = dce110_is_panel_powered_on,
        .disable_stream_gating = dcn20_disable_stream_gating,
        .enable_stream_gating = dcn20_enable_stream_gating,
        .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
index e45683ac871a2ba86d898bab5175ad10393db0cd..aa46c35b05a23b29052c22a85890b56e5f8f3643 100644 (file)
@@ -203,29 +203,6 @@ static bool update_cfg_data(
        return true;
 }
 
-void dcn21_link_encoder_get_max_link_cap(struct link_encoder *enc,
-       struct dc_link_settings *link_settings)
-{
-       struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
-       uint32_t value;
-
-       REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &value);
-
-       if (!value && link_settings->lane_count > LANE_COUNT_TWO)
-               link_settings->lane_count = LANE_COUNT_TWO;
-}
-
-bool dcn21_link_encoder_is_in_alt_mode(struct link_encoder *enc)
-{
-       struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
-       uint32_t value;
-
-       REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &value);
-
-       // if value == 1 alt mode is disabled, otherwise it is enabled
-       return !value;
-}
-
 bool dcn21_link_encoder_acquire_phy(struct link_encoder *enc)
 {
        struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
@@ -348,8 +325,8 @@ static const struct link_encoder_funcs dcn21_link_enc_funcs = {
        .fec_set_ready = enc2_fec_set_ready,
        .fec_is_active = enc2_fec_is_active,
        .get_dig_frontend = dcn10_get_dig_frontend,
-       .is_in_alt_mode = dcn21_link_encoder_is_in_alt_mode,
-       .get_max_link_cap = dcn21_link_encoder_get_max_link_cap,
+       .is_in_alt_mode = dcn20_link_encoder_is_in_alt_mode,
+       .get_max_link_cap = dcn20_link_encoder_get_max_link_cap,
 };
 
 void dcn21_link_encoder_construct(
index a721bb401ef0822cfdedbe384e4c08745bc7f3e1..f00a568350848a644655b78304559c22cc389a1d 100644 (file)
@@ -61,6 +61,7 @@
 #include "dcn21_hubbub.h"
 #include "dcn10/dcn10_resource.h"
 #include "dce110/dce110_resource.h"
+#include "dce/dce_panel_cntl.h"
 
 #include "dcn20/dcn20_dwb.h"
 #include "dcn20/dcn20_mmhubbub.h"
@@ -85,6 +86,7 @@
 #include "vm_helper.h"
 #include "dcn20/dcn20_vmid.h"
 #include "dce/dmub_psr.h"
+#include "dce/dmub_abm.h"
 
 #define SOC_BOUNDING_BOX_VALID false
 #define DC_LOGGER_INIT(logger)
@@ -803,7 +805,7 @@ static const struct resource_caps res_cap_rn = {
                .num_pll = 5,  // maybe 3 because the last two used for USB-c
                .num_dwb = 1,
                .num_ddc = 5,
-               .num_vmid = 1,
+               .num_vmid = 16,
                .num_dsc = 3,
 };
 
@@ -995,9 +997,12 @@ static void dcn21_resource_destruct(struct dcn21_resource_pool *pool)
                pool->base.dp_clock_source = NULL;
        }
 
-
-       if (pool->base.abm != NULL)
-               dce_abm_destroy(&pool->base.abm);
+       if (pool->base.abm != NULL) {
+               if (pool->base.abm->ctx->dc->config.disable_dmcu)
+                       dmub_abm_destroy(&pool->base.abm);
+               else
+                       dce_abm_destroy(&pool->base.abm);
+       }
 
        if (pool->base.dmcu != NULL)
                dce_dmcu_destroy(&pool->base.dmcu);
@@ -1290,6 +1295,7 @@ static struct hubbub *dcn21_hubbub_create(struct dc_context *ctx)
                vmid->shifts = &vmid_shifts;
                vmid->masks = &vmid_masks;
        }
+       hubbub->num_vmid = res_cap_rn.num_vmid;
 
        return &hubbub->base;
 }
@@ -1379,7 +1385,8 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
        struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool);
        struct clk_limit_table *clk_table = &bw_params->clk_table;
        struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
-       unsigned int i, j, closest_clk_lvl;
+       unsigned int i, closest_clk_lvl;
+       int j;
 
        // Default clock levels are used for diags, which may lead to overclocking.
        if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
@@ -1591,6 +1598,18 @@ static const struct dcn10_link_enc_registers link_enc_regs[] = {
        link_regs(4, E),
 };
 
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+       { DCN_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+       DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+       DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
 #define aux_regs(id)\
 [id] = {\
        DCN2_AUX_REG_LIST(id)\
@@ -1676,6 +1695,24 @@ static struct link_encoder *dcn21_link_encoder_create(
 
        return &enc21->enc10.base;
 }
+
+static struct panel_cntl *dcn21_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+       struct dce_panel_cntl *panel_cntl =
+               kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+       if (!panel_cntl)
+               return NULL;
+
+       dce_panel_cntl_construct(panel_cntl,
+                       init_data,
+                       &panel_cntl_regs[init_data->inst],
+                       &panel_cntl_shift,
+                       &panel_cntl_mask);
+
+       return &panel_cntl->base;
+}
+
 #define CTX ctx
 
 #define REG(reg_name) \
@@ -1694,12 +1731,8 @@ static int dcn21_populate_dml_pipes_from_context(
 {
        uint32_t pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes);
        int i;
-       struct resource_context *res_ctx = &context->res_ctx;
-
-       for (i = 0; i < dc->res_pool->pipe_count; i++) {
 
-               if (!res_ctx->pipe_ctx[i].stream)
-                       continue;
+       for (i = 0; i < pipe_cnt; i++) {
 
                pipes[i].pipe.src.hostvm = 1;
                pipes[i].pipe.src.gpuvm = 1;
@@ -1724,6 +1757,7 @@ enum dc_status dcn21_patch_unknown_plane_state(struct dc_plane_state *plane_stat
 static struct resource_funcs dcn21_res_pool_funcs = {
        .destroy = dcn21_destroy_resource_pool,
        .link_enc_create = dcn21_link_encoder_create,
+       .panel_cntl_create = dcn21_panel_cntl_create,
        .validate_bandwidth = dcn21_validate_bandwidth,
        .populate_dml_pipes = dcn21_populate_dml_pipes_from_context,
        .add_stream_to_ctx = dcn20_add_stream_to_ctx,
@@ -1770,7 +1804,6 @@ static bool dcn21_resource_construct(
        dc->caps.i2c_speed_in_khz = 100;
        dc->caps.max_cursor_size = 256;
        dc->caps.dmdata_alloc_size = 2048;
-       dc->caps.hw_3d_lut = true;
 
        dc->caps.max_slave_planes = 1;
        dc->caps.post_blend_color_processing = true;
@@ -1779,6 +1812,40 @@ static bool dcn21_resource_construct(
        dc->caps.dmcub_support = true;
        dc->caps.is_apu = true;
 
+       /* Color pipeline capabilities */
+       dc->caps.color.dpp.dcn_arch = 1;
+       dc->caps.color.dpp.input_lut_shared = 0;
+       dc->caps.color.dpp.icsc = 1;
+       dc->caps.color.dpp.dgam_ram = 1;
+       dc->caps.color.dpp.dgam_rom_caps.srgb = 1;
+       dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1;
+       dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 0;
+       dc->caps.color.dpp.dgam_rom_caps.pq = 0;
+       dc->caps.color.dpp.dgam_rom_caps.hlg = 0;
+       dc->caps.color.dpp.post_csc = 0;
+       dc->caps.color.dpp.gamma_corr = 0;
+
+       dc->caps.color.dpp.hw_3d_lut = 1;
+       dc->caps.color.dpp.ogam_ram = 1;
+       // no OGAM ROM on DCN2
+       dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
+       dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0;
+       dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0;
+       dc->caps.color.dpp.ogam_rom_caps.pq = 0;
+       dc->caps.color.dpp.ogam_rom_caps.hlg = 0;
+       dc->caps.color.dpp.ocsc = 0;
+
+       dc->caps.color.mpc.gamut_remap = 0;
+       dc->caps.color.mpc.num_3dluts = 0;
+       dc->caps.color.mpc.shared_3d_lut = 0;
+       dc->caps.color.mpc.ogam_ram = 1;
+       dc->caps.color.mpc.ogam_rom_caps.srgb = 0;
+       dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0;
+       dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0;
+       dc->caps.color.mpc.ogam_rom_caps.pq = 0;
+       dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
+       dc->caps.color.mpc.ocsc = 1;
+
        if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
                dc->debug = debug_defaults_drv;
        else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
@@ -1831,17 +1898,19 @@ static bool dcn21_resource_construct(
                goto create_fail;
        }
 
-       pool->base.dmcu = dcn21_dmcu_create(ctx,
-                       &dmcu_regs,
-                       &dmcu_shift,
-                       &dmcu_mask);
-       if (pool->base.dmcu == NULL) {
-               dm_error("DC: failed to create dmcu!\n");
-               BREAK_TO_DEBUGGER();
-               goto create_fail;
+       if (!dc->config.disable_dmcu) {
+               pool->base.dmcu = dcn21_dmcu_create(ctx,
+                               &dmcu_regs,
+                               &dmcu_shift,
+                               &dmcu_mask);
+               if (pool->base.dmcu == NULL) {
+                       dm_error("DC: failed to create dmcu!\n");
+                       BREAK_TO_DEBUGGER();
+                       goto create_fail;
+               }
        }
 
-       if (dc->debug.disable_dmcu) {
+       if (dc->config.disable_dmcu) {
                pool->base.psr = dmub_psr_create(ctx);
 
                if (pool->base.psr == NULL) {
@@ -1851,15 +1920,16 @@ static bool dcn21_resource_construct(
                }
        }
 
-       pool->base.abm = dce_abm_create(ctx,
+       if (dc->config.disable_dmcu)
+               pool->base.abm = dmub_abm_create(ctx,
+                       &abm_regs,
+                       &abm_shift,
+                       &abm_mask);
+       else
+               pool->base.abm = dce_abm_create(ctx,
                        &abm_regs,
                        &abm_shift,
                        &abm_mask);
-       if (pool->base.abm == NULL) {
-               dm_error("DC: failed to create abm!\n");
-               BREAK_TO_DEBUGGER();
-               goto create_fail;
-       }
 
        pool->base.pp_smu = dcn21_pp_smu_create(ctx);
 
index 5bbbafacc72038c8e365727bc270220466f9cc81..80170f9721ce949330d72408b81c48575af13101 100644 (file)
@@ -2599,21 +2599,44 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
                }
        }
 
+       {
+       float SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 999999;
+       int PlaneWithMinActiveDRAMClockChangeMargin = -1;
+
        mode_lib->vba.MinActiveDRAMClockChangeMargin = 999999;
        for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
                if (mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k]
                                < mode_lib->vba.MinActiveDRAMClockChangeMargin) {
                        mode_lib->vba.MinActiveDRAMClockChangeMargin =
                                        mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k];
+                       if (mode_lib->vba.BlendingAndTiming[k] == k) {
+                               PlaneWithMinActiveDRAMClockChangeMargin = k;
+                       } else {
+                               for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
+                                       if (mode_lib->vba.BlendingAndTiming[k] == j) {
+                                               PlaneWithMinActiveDRAMClockChangeMargin = j;
+                                       }
+                               }
+                       }
                }
        }
 
        mode_lib->vba.MinActiveDRAMClockChangeLatencySupported =
                        mode_lib->vba.MinActiveDRAMClockChangeMargin
                                        + mode_lib->vba.DRAMClockChangeLatency;
+       for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+               if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (mode_lib->vba.BlendingAndTiming[k] == k))
+                               && !(mode_lib->vba.BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
+                               && mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k]
+                                               < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) {
+                       SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank =
+                                       mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k];
+               }
+       }
 
        if (mode_lib->vba.DRAMClockChangeSupportsVActive &&
                        mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) {
+               mode_lib->vba.DRAMClockChangeWatermark += 25;
 
                for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
                        if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
@@ -2622,13 +2645,17 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
                                        mode_lib->vba.MinTTUVBlank[k] += 25;
                        }
                }
-               mode_lib->vba.DRAMClockChangeWatermark += 25;
+
                mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
        } else if (mode_lib->vba.DummyPStateCheck &&
                        mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
                mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
        } else {
-               if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) {
+               if ((mode_lib->vba.SynchronizedVBlank
+                               || mode_lib->vba.NumberOfActivePlanes == 1
+                               || (SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0 &&
+                                               mode_lib->vba.AllowDramClockChangeOneDisplayVactive))
+                                       && mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
                        mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vblank;
                        for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
                                if (!mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k]) {
@@ -2640,6 +2667,7 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
                        mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_unsupported;
                }
        }
+       }
        for (k = 0; k <= mode_lib->vba.soc.num_states; k++)
                for (j = 0; j < 2; j++)
                        mode_lib->vba.DRAMClockChangeSupport[k][j] = mode_lib->vba.DRAMClockChangeSupport[0][0];
index e6617c958bb8bfb26ef690facecec0432ae43fb2..a576eed94d9b0fe5ad8cfecbedaccf09c714945d 100644 (file)
@@ -3190,6 +3190,7 @@ static void CalculateFlipSchedule(
        double TimeForFetchingRowInVBlankImmediateFlip;
        double ImmediateFlipBW;
        double HostVMInefficiencyFactor;
+       double VRatioClamped;
 
        if (GPUVMEnable == true && HostVMEnable == true) {
                HostVMInefficiencyFactor =
@@ -3222,31 +3223,32 @@ static void CalculateFlipSchedule(
 
        *DestinationLinesToRequestRowInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
        *final_flip_bw = dml_max(PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (*DestinationLinesToRequestVMInImmediateFlip * LineTime), (MetaRowBytes + DPTEBytesPerRow) * HostVMInefficiencyFactor / (*DestinationLinesToRequestRowInImmediateFlip * LineTime));
+       VRatioClamped = (VRatio < 1.0) ? 1.0 : VRatio;
        if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) {
                if (GPUVMEnable == true && DCCEnable != true) {
                        min_row_time = dml_min(
-                                       dpte_row_height * LineTime / VRatio,
-                                       dpte_row_height_chroma * LineTime / (VRatio / 2));
+                                       dpte_row_height * LineTime / VRatioClamped,
+                                       dpte_row_height_chroma * LineTime / (VRatioClamped / 2));
                } else if (GPUVMEnable != true && DCCEnable == true) {
                        min_row_time = dml_min(
-                                       meta_row_height * LineTime / VRatio,
-                                       meta_row_height_chroma * LineTime / (VRatio / 2));
+                                       meta_row_height * LineTime / VRatioClamped,
+                                       meta_row_height_chroma * LineTime / (VRatioClamped / 2));
                } else {
                        min_row_time = dml_min4(
-                                       dpte_row_height * LineTime / VRatio,
-                                       meta_row_height * LineTime / VRatio,
-                                       dpte_row_height_chroma * LineTime / (VRatio / 2),
-                                       meta_row_height_chroma * LineTime / (VRatio / 2));
+                                       dpte_row_height * LineTime / VRatioClamped,
+                                       meta_row_height * LineTime / VRatioClamped,
+                                       dpte_row_height_chroma * LineTime / (VRatioClamped / 2),
+                                       meta_row_height_chroma * LineTime / (VRatioClamped / 2));
                }
        } else {
                if (GPUVMEnable == true && DCCEnable != true) {
-                       min_row_time = dpte_row_height * LineTime / VRatio;
+                       min_row_time = dpte_row_height * LineTime / VRatioClamped;
                } else if (GPUVMEnable != true && DCCEnable == true) {
-                       min_row_time = meta_row_height * LineTime / VRatio;
+                       min_row_time = meta_row_height * LineTime / VRatioClamped;
                } else {
                        min_row_time = dml_min(
-                                       dpte_row_height * LineTime / VRatio,
-                                       meta_row_height * LineTime / VRatio);
+                                       dpte_row_height * LineTime / VRatioClamped,
+                                       meta_row_height * LineTime / VRatioClamped);
                }
        }
 
@@ -5944,7 +5946,7 @@ static void CalculateMetaAndPTETimes(
                                                * PixelPTEReqHeightY[k];
                        }
                        dpte_groups_per_row_luma_ub = dml_ceil(
-                                       dpte_row_width_luma_ub[k] / dpte_group_width_luma,
+                                       (float) dpte_row_width_luma_ub[k] / dpte_group_width_luma,
                                        1);
                        time_per_pte_group_nom_luma[k] = DST_Y_PER_PTE_ROW_NOM_L[k] * HTotal[k]
                                        / PixelClock[k] / dpte_groups_per_row_luma_ub;
@@ -5968,7 +5970,7 @@ static void CalculateMetaAndPTETimes(
                                                        * PixelPTEReqHeightC[k];
                                }
                                dpte_groups_per_row_chroma_ub = dml_ceil(
-                                               dpte_row_width_chroma_ub[k]
+                                               (float) dpte_row_width_chroma_ub[k]
                                                                / dpte_group_width_chroma,
                                                1);
                                time_per_pte_group_nom_chroma[k] = DST_Y_PER_PTE_ROW_NOM_C[k]
index b8ec08e3b7a36da30e6ae22db50153bb27a3c4b7..90a5fefef05b10566fc036e7beffde338005b59f 100644 (file)
@@ -1490,19 +1490,30 @@ static void dml_rq_dlg_get_dlg_params(
        disp_dlg_regs->refcyc_per_pte_group_vblank_l =
                        (unsigned int) (dst_y_per_row_vblank * (double) htotal
                                        * ref_freq_to_pix_freq / (double) dpte_groups_per_row_ub_l);
-       ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int)dml_pow(2, 13));
+       if ((refclk_freq_in_mhz / ref_freq_to_pix_freq < 28) &&
+                       disp_dlg_regs->refcyc_per_pte_group_vblank_l >= (unsigned int)dml_pow(2, 13))
+               disp_dlg_regs->refcyc_per_pte_group_vblank_l = (1 << 13) - 1;
+       else
+               ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int)dml_pow(2, 13));
 
        if (dual_plane) {
                disp_dlg_regs->refcyc_per_pte_group_vblank_c = (unsigned int) (dst_y_per_row_vblank
                                * (double) htotal * ref_freq_to_pix_freq
                                / (double) dpte_groups_per_row_ub_c);
-               ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c
+               if ((refclk_freq_in_mhz / ref_freq_to_pix_freq < 28) &&
+                               disp_dlg_regs->refcyc_per_pte_group_vblank_c >= (unsigned int)dml_pow(2, 13))
+                       disp_dlg_regs->refcyc_per_pte_group_vblank_c = (1 << 13) - 1;
+               else
+                       ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c
                                < (unsigned int)dml_pow(2, 13));
        }
 
-       disp_dlg_regs->refcyc_per_meta_chunk_vblank_l =
+       if (src->dcc)
+               disp_dlg_regs->refcyc_per_meta_chunk_vblank_l =
                        (unsigned int) (dst_y_per_row_vblank * (double) htotal
                                        * ref_freq_to_pix_freq / (double) meta_chunks_per_row_ub_l);
+       else
+               disp_dlg_regs->refcyc_per_meta_chunk_vblank_l = 0;
        ASSERT(disp_dlg_regs->refcyc_per_meta_chunk_vblank_l < (unsigned int)dml_pow(2, 13));
 
        disp_dlg_regs->refcyc_per_meta_chunk_vblank_c =
index 687010c17324cc595eb70ae81136fa6ca0af14a7..439ffd04be34c69734b2f05657f6158cc0e1ea64 100644 (file)
@@ -118,9 +118,11 @@ struct _vcs_dpi_soc_bounding_box_st {
        double urgent_latency_adjustment_fabric_clock_component_us;
        double urgent_latency_adjustment_fabric_clock_reference_mhz;
        bool disable_dram_clock_change_vactive_support;
+       bool allow_dram_clock_one_display_vactive;
 };
 
 struct _vcs_dpi_ip_params_st {
+       bool use_min_dcfclk;
        bool gpuvm_enable;
        bool hostvm_enable;
        unsigned int gpuvm_max_page_table_levels;
index 6b525c52124c6aa3b93dd3ee3cb0665727a9f2ac..b19988f547218a0aaaa5b8ffbb72b032c073a6a9 100644 (file)
@@ -224,6 +224,7 @@ static void fetch_socbb_params(struct display_mode_lib *mode_lib)
        mode_lib->vba.DummyPStateCheck = soc->dram_clock_change_latency_us == soc->dummy_pstate_latency_us;
        mode_lib->vba.DRAMClockChangeSupportsVActive = !soc->disable_dram_clock_change_vactive_support ||
                        mode_lib->vba.DummyPStateCheck;
+       mode_lib->vba.AllowDramClockChangeOneDisplayVactive = soc->allow_dram_clock_one_display_vactive;
 
        mode_lib->vba.Downspreading = soc->downspread_percent;
        mode_lib->vba.DRAMChannelWidth = soc->dram_channel_width_bytes;   // new!
@@ -280,6 +281,7 @@ static void fetch_ip_params(struct display_mode_lib *mode_lib)
        ip_params_st *ip = &mode_lib->vba.ip;
 
        // IP Parameters
+       mode_lib->vba.UseMinimumRequiredDCFCLK = ip->use_min_dcfclk;
        mode_lib->vba.MaxNumDPP = ip->max_num_dpp;
        mode_lib->vba.MaxNumOTG = ip->max_num_otg;
        mode_lib->vba.MaxNumHDMIFRLOutputs = ip->max_num_hdmi_frl_outputs;
index 3a734171f083407f105dab8a924f188a24952b7d..3f559e725ab148b24f444fff66b70b0f99457a81 100644 (file)
@@ -896,6 +896,8 @@ struct vba_vars_st {
        bool dummystring[DC__NUM_DPP__MAX];
        double BPP;
        enum odm_combine_policy ODMCombinePolicy;
+       bool UseMinimumRequiredDCFCLK;
+       bool AllowDramClockChangeOneDisplayVactive;
 };
 
 bool CalculateMinAndMaxPrefetchMode(
index 87d682d25278ab4a8f5f41e9a586112299006f3c..0ea6662a1563215e5aadc309b0ed02467f711db5 100644 (file)
@@ -129,7 +129,7 @@ static bool dsc_line_buff_depth_from_dpcd(int dpcd_line_buff_bit_depth, int *lin
 static bool dsc_throughput_from_dpcd(int dpcd_throughput, int *throughput)
 {
        switch (dpcd_throughput) {
-       case DP_DSC_THROUGHPUT_MODE_0_UPSUPPORTED:
+       case DP_DSC_THROUGHPUT_MODE_0_UNSUPPORTED:
                *throughput = 0;
                break;
        case DP_DSC_THROUGHPUT_MODE_0_170:
index 6f730b5bfe425cb06c220d79acdc4a17769e9de8..5e384a8a83dc21713d1a8bde89b779826713fca4 100644 (file)
@@ -322,3 +322,92 @@ static const struct protection_properties dp_11_protection = {
        .process_transaction = dp_11_process_transaction
 };
 
+static const struct protection_properties *get_protection_properties_by_signal(
+       struct dc_link *link,
+       enum signal_type st,
+       enum hdcp_version version)
+{
+       switch (version) {
+       case HDCP_VERSION_14:
+               switch (st) {
+               case SIGNAL_TYPE_DVI_SINGLE_LINK:
+               case SIGNAL_TYPE_DVI_DUAL_LINK:
+               case SIGNAL_TYPE_HDMI_TYPE_A:
+                       return &hdmi_14_protection;
+               case SIGNAL_TYPE_DISPLAY_PORT:
+                       if (link &&
+                               (link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
+                               link->dpcd_caps.dongle_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER)) {
+                               return &non_supported_protection;
+                       }
+                       return &dp_11_protection;
+               case SIGNAL_TYPE_DISPLAY_PORT_MST:
+               case SIGNAL_TYPE_EDP:
+                       return &dp_11_protection;
+               default:
+                       return &non_supported_protection;
+               }
+               break;
+       case HDCP_VERSION_22:
+               switch (st) {
+               case SIGNAL_TYPE_DVI_SINGLE_LINK:
+               case SIGNAL_TYPE_DVI_DUAL_LINK:
+               case SIGNAL_TYPE_HDMI_TYPE_A:
+                       return &hdmi_14_protection; //todo version2.2
+               case SIGNAL_TYPE_DISPLAY_PORT:
+               case SIGNAL_TYPE_DISPLAY_PORT_MST:
+               case SIGNAL_TYPE_EDP:
+                       return &dp_11_protection;  //todo version2.2
+               default:
+                       return &non_supported_protection;
+               }
+               break;
+       default:
+               return &non_supported_protection;
+       }
+}
+
+enum hdcp_message_status dc_process_hdcp_msg(
+       enum signal_type signal,
+       struct dc_link *link,
+       struct hdcp_protection_message *message_info)
+{
+       enum hdcp_message_status status = HDCP_MESSAGE_FAILURE;
+       uint32_t i = 0;
+
+       const struct protection_properties *protection_props;
+
+       if (!message_info)
+               return HDCP_MESSAGE_UNSUPPORTED;
+
+       if (message_info->msg_id < HDCP_MESSAGE_ID_READ_BKSV ||
+               message_info->msg_id >= HDCP_MESSAGE_ID_MAX)
+               return HDCP_MESSAGE_UNSUPPORTED;
+
+       protection_props =
+               get_protection_properties_by_signal(
+                       link,
+                       signal,
+                       message_info->version);
+
+       if (!protection_props->supported)
+               return HDCP_MESSAGE_UNSUPPORTED;
+
+       if (protection_props->process_transaction(
+               link,
+               message_info)) {
+               status = HDCP_MESSAGE_SUCCESS;
+       } else {
+               for (i = 0; i < message_info->max_retries; i++) {
+                       if (protection_props->process_transaction(
+                                               link,
+                                               message_info)) {
+                               status = HDCP_MESSAGE_SUCCESS;
+                               break;
+                       }
+               }
+       }
+
+       return status;
+}
+
index d523fc9547e705d66bf5322384f3a91e1eb78045..c7fd702a4a87db0e91141e8c602d9f303d66a246 100644 (file)
@@ -38,6 +38,7 @@
 #endif
 #include "dwb.h"
 #include "mcif_wb.h"
+#include "panel_cntl.h"
 
 #define MAX_CLOCK_SOURCES 7
 
@@ -92,6 +93,8 @@ struct clk_bw_params;
 struct resource_funcs {
        void (*destroy)(struct resource_pool **pool);
        void (*link_init)(struct dc_link *link);
+       struct panel_cntl*(*panel_cntl_create)(
+               const struct panel_cntl_init_data *panel_cntl_init_data);
        struct link_encoder *(*link_enc_create)(
                        const struct encoder_init_data *init);
        bool (*validate_bandwidth)(
index d607b3191954fc57b2598c8962d3980d1ed15a00..e8ce8c85adf1c48cf1aaf7683452f2482e8d9a87 100644 (file)
 
 #include "dm_services_types.h"
 
-struct abm_backlight_registers {
-       unsigned int BL_PWM_CNTL;
-       unsigned int BL_PWM_CNTL2;
-       unsigned int BL_PWM_PERIOD_CNTL;
-       unsigned int LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV;
-};
-
 struct abm {
        struct dc_context *ctx;
        const struct abm_funcs *funcs;
        bool dmcu_is_running;
-       /* registers setting needs to be saved and restored at InitBacklight */
-       struct abm_backlight_registers stored_backlight_registers;
 };
 
 struct abm_funcs {
-       void (*abm_init)(struct abm *abm);
+       void (*abm_init)(struct abm *abm, uint32_t back_light);
        bool (*set_abm_level)(struct abm *abm, unsigned int abm_level);
-       bool (*set_abm_immediate_disable)(struct abm *abm);
-       bool (*set_pipe)(struct abm *abm, unsigned int controller_id);
-       bool (*init_backlight)(struct abm *abm);
+       bool (*set_abm_immediate_disable)(struct abm *abm, unsigned int panel_inst);
+       bool (*set_pipe)(struct abm *abm, unsigned int controller_id, unsigned int panel_inst);
 
        /* backlight_pwm_u16_16 is unsigned 32 bit,
         * 16 bit integer + 16 fractional, where 1.0 is max backlight value.
@@ -56,10 +46,13 @@ struct abm_funcs {
                        unsigned int backlight_pwm_u16_16,
                        unsigned int frame_ramp,
                        unsigned int controller_id,
-                       bool use_smooth_brightness);
+                       unsigned int panel_inst);
 
        unsigned int (*get_current_backlight)(struct abm *abm);
        unsigned int (*get_target_backlight)(struct abm *abm);
+       bool (*init_abm_config)(struct abm *abm,
+                       const char *src,
+                       unsigned int bytes);
 };
 
 #endif
index f5dd0cc73c63a9116b03f996a126649891ba96f6..47a566d82d6e4b79890a14ee517ba5c0b5a47f51 100644 (file)
@@ -144,6 +144,8 @@ struct hubbub_funcs {
        void (*allow_self_refresh_control)(struct hubbub *hubbub, bool allow);
 
        void (*apply_DEDCN21_147_wa)(struct hubbub *hubbub);
+
+       void (*force_wm_propagate_to_pipes)(struct hubbub *hubbub);
 };
 
 struct hubbub {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h
new file mode 100644 (file)
index 0000000..f9ab5ab
--- /dev/null
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+/*
+ * panel_cntl.h
+ *
+ *  Created on: Oct 6, 2015
+ *      Author: yonsun
+ */
+
+#ifndef DC_PANEL_CNTL_H_
+#define DC_PANEL_CNTL_H_
+
+#include "dc_types.h"
+
+#define MAX_BACKLIGHT_LEVEL 0xFFFF
+
+struct panel_cntl_backlight_registers {
+       unsigned int BL_PWM_CNTL;
+       unsigned int BL_PWM_CNTL2;
+       unsigned int BL_PWM_PERIOD_CNTL;
+       unsigned int LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV;
+};
+
+struct panel_cntl_funcs {
+       void (*destroy)(struct panel_cntl **panel_cntl);
+       uint32_t (*hw_init)(struct panel_cntl *panel_cntl);
+       bool (*is_panel_backlight_on)(struct panel_cntl *panel_cntl);
+       bool (*is_panel_powered_on)(struct panel_cntl *panel_cntl);
+       void (*store_backlight_level)(struct panel_cntl *panel_cntl);
+       void (*driver_set_backlight)(struct panel_cntl *panel_cntl,
+                       uint32_t backlight_pwm_u16_16);
+};
+
+struct panel_cntl_init_data {
+       struct dc_context *ctx;
+       uint32_t inst;
+};
+
+struct panel_cntl {
+       const struct panel_cntl_funcs *funcs;
+       struct dc_context *ctx;
+       uint32_t inst;
+       /* registers setting needs to be saved and restored at InitBacklight */
+       struct panel_cntl_backlight_registers stored_backlight_registers;
+};
+
+#endif /* DC_PANEL_CNTL_H_ */
index e5e7d94026fc6b7ed0c796ff508d7f601cbbcd32..f803191e3134ad5db9ffc2282b7934c0156a77fe 100644 (file)
@@ -117,6 +117,9 @@ struct crc_params {
 
        enum crc_selection selection;
 
+       uint8_t dsc_mode;
+       uint8_t odm_mode;
+
        bool continuous_mode;
        bool enable;
 };
index fecc80c47c267eb015572bf6df6a3708088e73ef..2947d1b15512910658a06a07792414bfab21fc25 100644 (file)
@@ -173,6 +173,8 @@ struct scaler_data {
        struct scaling_taps taps;
        struct rect viewport;
        struct rect viewport_c;
+       struct rect viewport_unadjusted;
+       struct rect viewport_c_unadjusted;
        struct rect recout;
        struct scaling_ratios ratios;
        struct scl_inits inits;
index 08307f3796e3f78dac581f54d9d89157905a7c39..8e72f077e5520e590074f13acf7abcbdf25184c7 100644 (file)
@@ -75,9 +75,13 @@ struct hw_sequencer_funcs {
        void (*wait_for_mpcc_disconnect)(struct dc *dc,
                        struct resource_pool *res_pool,
                        struct pipe_ctx *pipe_ctx);
+       void (*edp_backlight_control)(
+                       struct dc_link *link,
+                       bool enable);
        void (*program_triplebuffer)(const struct dc *dc,
                struct pipe_ctx *pipe_ctx, bool enableTripleBuffer);
        void (*update_pending_status)(struct pipe_ctx *pipe_ctx);
+       void (*power_down)(struct dc *dc);
 
        /* Pipe Lock Related */
        void (*pipe_control_lock)(struct dc *dc,
@@ -193,6 +197,12 @@ struct hw_sequencer_funcs {
                        unsigned int bufSize, unsigned int mask);
        void (*clear_status_bits)(struct dc *dc, unsigned int mask);
 
+       bool (*set_backlight_level)(struct pipe_ctx *pipe_ctx,
+                       uint32_t backlight_pwm_u16_16,
+                       uint32_t frame_ramp);
+
+       void (*set_abm_immediate_disable)(struct pipe_ctx *pipe_ctx);
+
 
 };
 
index 52a26e6be066b4ff978237d0feb76fb1c33e8e12..36e906bb6bfc5eded324c0a0c5adb2ecb4d95722 100644 (file)
@@ -100,8 +100,6 @@ struct hwseq_private_funcs {
                        struct dc *dc);
        void (*edp_backlight_control)(struct dc_link *link,
                        bool enable);
-       bool (*is_panel_backlight_on)(struct dc_link *link);
-       bool (*is_panel_powered_on)(struct dc_link *link);
        void (*setup_vupdate_interrupt)(struct dc *dc,
                        struct pipe_ctx *pipe_ctx);
        bool (*did_underflow_occur)(struct dc *dc, struct pipe_ctx *pipe_ctx);
index ca4c36c0c9bcf9b0db7b1e65f93c1ed4399d28fc..a9be495af922e0b4eae3a5f6ca155c6c29d74a42 100644 (file)
@@ -138,9 +138,6 @@ struct pipe_ctx *find_idle_secondary_pipe(
                const struct resource_pool *pool,
                const struct pipe_ctx *primary_pipe);
 
-bool resource_is_stream_unchanged(
-       struct dc_state *old_context, struct dc_stream_state *stream);
-
 bool resource_validate_attach_surfaces(
                const struct dc_validation_set set[],
                int set_count,
@@ -180,6 +177,8 @@ unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format);
 void get_audio_check(struct audio_info *aud_modes,
        struct audio_check *aud_chk);
 
+int get_num_mpc_splits(struct pipe_ctx *pipe);
+
 int get_num_odm_splits(struct pipe_ctx *pipe);
 
 #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
index 3464b2d5b89afa1d90dec9bb83ead082c1ba9986..348e9a600a728961c14d8ee02d392e63c9f38182 100644 (file)
@@ -84,6 +84,14 @@ static void virtual_link_encoder_destroy(struct link_encoder **enc)
        *enc = NULL;
 }
 
+static void virtual_link_encoder_get_max_link_cap(struct link_encoder *enc,
+               struct dc_link_settings *link_settings)
+{
+       /* Set Default link settings */
+       struct dc_link_settings max_link_cap = {LANE_COUNT_FOUR, LINK_RATE_HIGH,
+                               LINK_SPREAD_05_DOWNSPREAD_30KHZ, false, 0};
+       *link_settings = max_link_cap;
+}
 
 static const struct link_encoder_funcs virtual_lnk_enc_funcs = {
        .validate_output_with_stream =
@@ -94,6 +102,7 @@ static const struct link_encoder_funcs virtual_lnk_enc_funcs = {
        .enable_dp_output = virtual_link_encoder_enable_dp_output,
        .enable_dp_mst_output = virtual_link_encoder_enable_dp_mst_output,
        .disable_output = virtual_link_encoder_disable_output,
+       .get_max_link_cap = virtual_link_encoder_get_max_link_cap,
        .dp_set_lane_settings = virtual_link_encoder_dp_set_lane_settings,
        .dp_set_phy_pattern = virtual_link_encoder_dp_set_phy_pattern,
        .update_mst_stream_allocation_table =
similarity index 97%
rename from drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
rename to drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index c2671f2616c840dbbead13833126aa6d77f989d7..26d94eb5ab58e0de1076808d84557847fb527cdf 100644 (file)
  * other component within DAL.
  */
 
-#include "dmub_types.h"
-#include "dmub_cmd.h"
-#include "dmub_gpint_cmd.h"
-#include "dmub_rb.h"
+#include "inc/dmub_types.h"
+#include "inc/dmub_cmd.h"
+#include "inc/dmub_gpint_cmd.h"
+#include "inc/dmub_cmd_dal.h"
+#include "inc/dmub_rb.h"
 
 #if defined(__cplusplus)
 extern "C" {
@@ -75,7 +76,6 @@ extern "C" {
 
 /* Forward declarations */
 struct dmub_srv;
-struct dmub_cmd_header;
 struct dmub_srv_common_regs;
 
 /* enum dmub_status - return code for dmcub functions */
@@ -151,6 +151,7 @@ struct dmub_srv_region_params {
        uint32_t inst_const_size;
        uint32_t bss_data_size;
        uint32_t vbios_size;
+       const uint8_t *fw_inst_const;
        const uint8_t *fw_bss_data;
 };
 
@@ -457,7 +458,7 @@ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub);
  *   DMUB_STATUS_INVALID - unspecified error
  */
 enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
-                                   const struct dmub_cmd_header *cmd);
+                                   const union dmub_rb_cmd *cmd);
 
 /**
  * dmub_srv_cmd_execute() - Executes a queued sequence to the dmub
@@ -565,6 +566,16 @@ dmub_srv_send_gpint_command(struct dmub_srv *dmub,
 enum dmub_status dmub_srv_get_gpint_response(struct dmub_srv *dmub,
                                             uint32_t *response);
 
+/**
+ * dmub_flush_buffer_mem() - Read back entire frame buffer region.
+ * This ensures that the write from x86 has been flushed and will not
+ * hang the DMCUB.
+ * @fb: frame buffer to flush
+ *
+ * Can be called after software initialization.
+ */
+void dmub_flush_buffer_mem(const struct dmub_fb *fb);
+
 #if defined(__cplusplus)
 }
 #endif
index 10b5fa9d25884bdc0e00b7b7a42660048966f3f2..599bf2055bcb531e7a0d8f84c4bb4941731940aa 100644 (file)
@@ -228,6 +228,7 @@ struct dmub_cmd_psr_copy_settings_data {
        uint8_t smu_optimizations_en;
        uint8_t frame_delay;
        uint8_t frame_cap_ind;
+       struct dmub_psr_debug_flags debug;
 };
 
 struct dmub_rb_cmd_psr_copy_settings {
@@ -260,6 +261,8 @@ struct dmub_rb_cmd_psr_set_version {
 struct dmub_cmd_abm_set_pipe_data {
        uint32_t ramping_boundary;
        uint32_t otg_inst;
+       uint32_t panel_inst;
+       uint32_t set_pipe_option;
 };
 
 struct dmub_rb_cmd_abm_set_pipe {
@@ -303,6 +306,16 @@ struct dmub_rb_cmd_abm_set_pwm_frac {
        struct dmub_cmd_abm_set_pwm_frac_data abm_set_pwm_frac_data;
 };
 
+struct dmub_cmd_abm_init_config_data {
+       union dmub_addr src;
+       uint16_t bytes;
+};
+
+struct dmub_rb_cmd_abm_init_config {
+       struct dmub_cmd_header header;
+       struct dmub_cmd_abm_init_config_data abm_init_config_data;
+};
+
 union dmub_rb_cmd {
        struct dmub_rb_cmd_read_modify_write read_modify_write;
        struct dmub_rb_cmd_reg_field_update_sequence reg_field_update_seq;
@@ -324,6 +337,7 @@ union dmub_rb_cmd {
        struct dmub_rb_cmd_abm_set_level abm_set_level;
        struct dmub_rb_cmd_abm_set_ambient_level abm_set_ambient_level;
        struct dmub_rb_cmd_abm_set_pwm_frac abm_set_pwm_frac;
+       struct dmub_rb_cmd_abm_init_config abm_init_config;
 };
 
 #pragma pack(pop)
index d37535d219285c9e032f76c630089caaa2837148..e42de9ded275e31d2cd608f3af3afc5725c21986 100644 (file)
  */
 
 enum dmub_cmd_psr_type {
-       DMUB_CMD__PSR_SET_VERSION       = 0,
-       DMUB_CMD__PSR_COPY_SETTINGS     = 1,
-       DMUB_CMD__PSR_ENABLE            = 2,
-       DMUB_CMD__PSR_DISABLE           = 3,
-       DMUB_CMD__PSR_SET_LEVEL         = 4,
+       DMUB_CMD__PSR_SET_VERSION               = 0,
+       DMUB_CMD__PSR_COPY_SETTINGS             = 1,
+       DMUB_CMD__PSR_ENABLE                    = 2,
+       DMUB_CMD__PSR_DISABLE                   = 3,
+       DMUB_CMD__PSR_SET_LEVEL                 = 4,
 };
 
 enum psr_version {
-       PSR_VERSION_1                   = 0x10, // PSR Version 1
-       PSR_VERSION_2                   = 0x20, // PSR Version 2, includes selective update
-       PSR_VERSION_2_1                 = 0x21, // PSR Version 2, includes Y-coordinate support for SU
+       PSR_VERSION_1                           = 0,
+       PSR_VERSION_UNSUPPORTED                 = 0xFFFFFFFF,
 };
 
 enum dmub_cmd_abm_type {
index df875fdd2ab07a2d99342f167bd83292b63bf7d5..2ae48c18bb5b94fffabb00e9d26e2bd9240aaa90 100644 (file)
@@ -33,8 +33,6 @@
 extern "C" {
 #endif
 
-struct dmub_cmd_header;
-
 struct dmub_rb_init_params {
        void *ctx;
        void *base_address;
@@ -71,7 +69,7 @@ static inline bool dmub_rb_full(struct dmub_rb *rb)
 }
 
 static inline bool dmub_rb_push_front(struct dmub_rb *rb,
-                                     const struct dmub_cmd_header *cmd)
+                                     const union dmub_rb_cmd *cmd)
 {
        uint64_t volatile *dst = (uint64_t volatile *)(rb->base_address) + rb->wrpt / sizeof(uint64_t);
        const uint64_t *src = (const uint64_t *)cmd;
@@ -93,7 +91,7 @@ static inline bool dmub_rb_push_front(struct dmub_rb *rb,
 }
 
 static inline bool dmub_rb_front(struct dmub_rb *rb,
-                                struct dmub_cmd_header *cmd)
+                                union dmub_rb_cmd  *cmd)
 {
        uint8_t *rd_ptr = (uint8_t *)rb->base_address + rb->rptr;
 
index 41d524b0db2f3dbac64ab45d4f17c7e17486d62b..bed5b023a3967ac95a6eb575b5c2a308eb38e3ee 100644 (file)
@@ -49,6 +49,12 @@ extern "C" {
 #define dmub_udelay(microseconds) udelay(microseconds)
 #endif
 
+/* Maximum number of streams on any ASIC. */
+#define DMUB_MAX_STREAMS 6
+
+/* Maximum number of planes on any ASIC. */
+#define DMUB_MAX_PLANES 6
+
 union dmub_addr {
        struct {
                uint32_t low_part;
@@ -57,6 +63,11 @@ union dmub_addr {
        uint64_t quad_part;
 };
 
+struct dmub_psr_debug_flags {
+       uint8_t visual_confirm : 1;
+       uint8_t reserved : 7;
+};
+
 #if defined(__cplusplus)
 }
 #endif
index 63bb9e2c81de2c91c8b9e37037f9499329c33137..edc73d6d7ba2848cb8a779a292a86dea8af9df23 100644 (file)
@@ -23,7 +23,7 @@
  *
  */
 
-#include "../inc/dmub_srv.h"
+#include "../dmub_srv.h"
 #include "dmub_reg.h"
 #include "dmub_dcn20.h"
 
@@ -186,14 +186,22 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub,
 
        dmub_dcn20_get_fb_base_offset(dmub, &fb_base, &fb_offset);
 
-       dmub_dcn20_translate_addr(&cw2->offset, fb_base, fb_offset, &offset);
-
-       REG_WRITE(DMCUB_REGION3_CW2_OFFSET, offset.u.low_part);
-       REG_WRITE(DMCUB_REGION3_CW2_OFFSET_HIGH, offset.u.high_part);
-       REG_WRITE(DMCUB_REGION3_CW2_BASE_ADDRESS, cw2->region.base);
-       REG_SET_2(DMCUB_REGION3_CW2_TOP_ADDRESS, 0,
-                 DMCUB_REGION3_CW2_TOP_ADDRESS, cw2->region.top,
-                 DMCUB_REGION3_CW2_ENABLE, 1);
+       if (cw2->region.base != cw2->region.top) {
+               dmub_dcn20_translate_addr(&cw2->offset, fb_base, fb_offset,
+                                         &offset);
+
+               REG_WRITE(DMCUB_REGION3_CW2_OFFSET, offset.u.low_part);
+               REG_WRITE(DMCUB_REGION3_CW2_OFFSET_HIGH, offset.u.high_part);
+               REG_WRITE(DMCUB_REGION3_CW2_BASE_ADDRESS, cw2->region.base);
+               REG_SET_2(DMCUB_REGION3_CW2_TOP_ADDRESS, 0,
+                         DMCUB_REGION3_CW2_TOP_ADDRESS, cw2->region.top,
+                         DMCUB_REGION3_CW2_ENABLE, 1);
+       } else {
+               REG_WRITE(DMCUB_REGION3_CW2_OFFSET, 0);
+               REG_WRITE(DMCUB_REGION3_CW2_OFFSET_HIGH, 0);
+               REG_WRITE(DMCUB_REGION3_CW2_BASE_ADDRESS, 0);
+               REG_WRITE(DMCUB_REGION3_CW2_TOP_ADDRESS, 0);
+       }
 
        dmub_dcn20_translate_addr(&cw3->offset, fb_base, fb_offset, &offset);
 
index 5bed9fcd6b5cc06f76f8fb2827de3a360b8aab3e..e8f488232e347582bdb33604403df6e3f878988b 100644 (file)
@@ -23,7 +23,7 @@
  *
  */
 
-#include "../inc/dmub_srv.h"
+#include "../dmub_srv.h"
 #include "dmub_reg.h"
 #include "dmub_dcn21.h"
 
index 4094eca212f0b11b8aa8e8a5a765c6d4bf6a94c1..ca0c8a54b635e49214a9326dd60af4776f6bdac1 100644 (file)
@@ -24,7 +24,7 @@
  */
 
 #include "dmub_reg.h"
-#include "../inc/dmub_srv.h"
+#include "../dmub_srv.h"
 
 struct dmub_reg_value_masks {
        uint32_t value;
index ce32cc7933c40777f185b08063b4b47cd5a892b1..0e3751d94cb09a937f91de74aadf1a644a45aff6 100644 (file)
@@ -23,7 +23,7 @@
  *
  */
 
-#include "../inc/dmub_srv.h"
+#include "../dmub_srv.h"
 #include "dmub_dcn20.h"
 #include "dmub_dcn21.h"
 #include "dmub_fw_meta.h"
@@ -70,7 +70,7 @@ static inline uint32_t dmub_align(uint32_t val, uint32_t factor)
        return (val + factor - 1) / factor * factor;
 }
 
-static void dmub_flush_buffer_mem(const struct dmub_fb *fb)
+void dmub_flush_buffer_mem(const struct dmub_fb *fb)
 {
        const uint8_t *base = (const uint8_t *)fb->cpu_addr;
        uint8_t buf[64];
@@ -91,18 +91,32 @@ static void dmub_flush_buffer_mem(const struct dmub_fb *fb)
 }
 
 static const struct dmub_fw_meta_info *
-dmub_get_fw_meta_info(const uint8_t *fw_bss_data, uint32_t fw_bss_data_size)
+dmub_get_fw_meta_info(const struct dmub_srv_region_params *params)
 {
        const union dmub_fw_meta *meta;
+       const uint8_t *blob = NULL;
+       uint32_t blob_size = 0;
+       uint32_t meta_offset = 0;
+
+       if (params->fw_bss_data) {
+               /* Legacy metadata region. */
+               blob = params->fw_bss_data;
+               blob_size = params->bss_data_size;
+               meta_offset = DMUB_FW_META_OFFSET;
+       } else if (params->fw_inst_const) {
+               /* Combined metadata region. */
+               blob = params->fw_inst_const;
+               blob_size = params->inst_const_size;
+               meta_offset = 0;
+       }
 
-       if (fw_bss_data == NULL)
+       if (!blob || !blob_size)
                return NULL;
 
-       if (fw_bss_data_size < sizeof(union dmub_fw_meta) + DMUB_FW_META_OFFSET)
+       if (blob_size < sizeof(union dmub_fw_meta) + meta_offset)
                return NULL;
 
-       meta = (const union dmub_fw_meta *)(fw_bss_data + fw_bss_data_size -
-                                           DMUB_FW_META_OFFSET -
+       meta = (const union dmub_fw_meta *)(blob + blob_size - meta_offset -
                                            sizeof(union dmub_fw_meta));
 
        if (meta->info.magic_value != DMUB_FW_META_MAGIC)
@@ -247,8 +261,7 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
        mail->base = dmub_align(bios->top, 256);
        mail->top = mail->base + DMUB_MAILBOX_SIZE;
 
-       fw_info = dmub_get_fw_meta_info(params->fw_bss_data,
-                                       params->bss_data_size);
+       fw_info = dmub_get_fw_meta_info(params);
 
        if (fw_info) {
                fw_state_size = fw_info->fw_region_size;
@@ -449,7 +462,7 @@ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
 }
 
 enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
-                                   const struct dmub_cmd_header *cmd)
+                                   const union dmub_rb_cmd *cmd)
 {
        if (!dmub->hw_init)
                return DMUB_STATUS_INVALID;
index f31e6befc8d68e260eb55acfa4d6e206f97f043e..42229b4effdce756fd5454fcb0c1d12797d75f09 100644 (file)
@@ -83,6 +83,12 @@ enum hdcp_link {
        HDCP_LINK_SECONDARY
 };
 
+enum hdcp_message_status {
+       HDCP_MESSAGE_SUCCESS,
+       HDCP_MESSAGE_FAILURE,
+       HDCP_MESSAGE_UNSUPPORTED
+};
+
 struct hdcp_protection_message {
        enum hdcp_version version;
        /* relevant only for DVI */
@@ -91,6 +97,7 @@ struct hdcp_protection_message {
        uint32_t length;
        uint8_t max_retries;
        uint8_t *data;
+       enum hdcp_message_status status;
 };
 
 #endif
index 6e008de25629be9cf18432b7728969c0ce5ce963..02c23b04d34be3e5c1383ecf7a9454fd672d4423 100644 (file)
@@ -40,8 +40,6 @@ struct dc_state;
  *
  */
 
-void dc_conn_log_hex_linux(const uint8_t *hex_data, int hex_data_count);
-
 void pre_surface_trace(
                struct dc *dc,
                const struct dc_plane_state *const *plane_states,
@@ -102,14 +100,12 @@ void context_clock_trace(
 #define CONN_DATA_DETECT(link, hex_data, hex_len, ...) \
                do { \
                        (void)(link); \
-                       dc_conn_log_hex_linux(hex_data, hex_len); \
                        DC_LOG_EVENT_DETECTION(__VA_ARGS__); \
                } while (0)
 
 #define CONN_DATA_LINK_LOSS(link, hex_data, hex_len, ...) \
                do { \
                        (void)(link); \
-                       dc_conn_log_hex_linux(hex_data, hex_len); \
                        DC_LOG_EVENT_LINK_LOSS(__VA_ARGS__); \
                } while (0)
 
index cac09d500fda938daa5899f44f0bdc87a2725ea5..9431b48aecb48687178d5c66cd8e0bbd8327ddc7 100644 (file)
@@ -1782,7 +1782,8 @@ rgb_user_alloc_fail:
        return ret;
 }
 
-bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
+bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps,
+               struct dc_transfer_func *input_tf,
                const struct dc_gamma *ramp, bool mapUserRamp)
 {
        struct dc_transfer_func_distributed_points *tf_pts = &input_tf->tf_pts;
@@ -1801,11 +1802,29 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
        /* we can use hardcoded curve for plain SRGB TF
         * If linear, it's bypass if on user ramp
         */
-       if (input_tf->type == TF_TYPE_PREDEFINED &&
-                       (input_tf->tf == TRANSFER_FUNCTION_SRGB ||
-                                       input_tf->tf == TRANSFER_FUNCTION_LINEAR) &&
-                                       !mapUserRamp)
-               return true;
+       if (input_tf->type == TF_TYPE_PREDEFINED) {
+               if ((input_tf->tf == TRANSFER_FUNCTION_SRGB ||
+                               input_tf->tf == TRANSFER_FUNCTION_LINEAR) &&
+                               !mapUserRamp)
+                       return true;
+
+               if (dc_caps != NULL &&
+                       dc_caps->dpp.dcn_arch == 1) {
+
+                       if (input_tf->tf == TRANSFER_FUNCTION_PQ &&
+                                       dc_caps->dpp.dgam_rom_caps.pq == 1)
+                               return true;
+
+                       if (input_tf->tf == TRANSFER_FUNCTION_GAMMA22 &&
+                                       dc_caps->dpp.dgam_rom_caps.gamma2_2 == 1)
+                               return true;
+
+                       // HLG OOTF not accounted for
+                       if (input_tf->tf == TRANSFER_FUNCTION_HLG &&
+                                       dc_caps->dpp.dgam_rom_caps.hlg == 1)
+                               return true;
+               }
+       }
 
        input_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
 
@@ -1902,7 +1921,7 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
 
 
 
-       if (ramp->type == GAMMA_CUSTOM)
+       if (ramp && ramp->type == GAMMA_CUSTOM)
                apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts);
 
        ret = true;
index 9994817a9a03267d79f6efb714c4b883819e88fb..7f56226ba77a91904ae3b64224098a868eb4a0a3 100644 (file)
@@ -30,6 +30,7 @@ struct dc_transfer_func;
 struct dc_gamma;
 struct dc_transfer_func_distributed_points;
 struct dc_rgb_fixed;
+struct dc_color_caps;
 enum dc_transfer_func_predefined;
 
 /* For SetRegamma ADL interface support
@@ -100,7 +101,8 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
                const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed,
                const struct freesync_hdr_tf_params *fs_params);
 
-bool mod_color_calculate_degamma_params(struct dc_transfer_func *output_tf,
+bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps,
+               struct dc_transfer_func *output_tf,
                const struct dc_gamma *ramp, bool mapUserRamp);
 
 bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
index c33454a9e0b4dc88a3e6bbc36a2946db0febcefd..eb7421e83b8658c0352fbb08d91ee25b0e90f8d7 100644 (file)
@@ -443,7 +443,7 @@ static bool vrr_settings_require_update(struct core_freesync *core_freesync,
                return true;
        } else if (in_vrr->state == VRR_STATE_ACTIVE_FIXED &&
                        in_vrr->fixed.target_refresh_in_uhz !=
-                                       in_config->min_refresh_in_uhz) {
+                                       in_config->fixed_refresh_in_uhz) {
                return true;
        } else if (in_vrr->min_refresh_in_uhz != min_refresh_in_uhz) {
                return true;
@@ -491,7 +491,7 @@ bool mod_freesync_get_v_position(struct mod_freesync *mod_freesync,
        return false;
 }
 
-static void build_vrr_infopacket_data(const struct mod_vrr_params *vrr,
+static void build_vrr_infopacket_data_v1(const struct mod_vrr_params *vrr,
                struct dc_info_packet *infopacket)
 {
        /* PB1 = 0x1A (24bit AMD IEEE OUI (0x00001A) - Byte 0) */
@@ -523,14 +523,74 @@ static void build_vrr_infopacket_data(const struct mod_vrr_params *vrr,
                        vrr->state == VRR_STATE_ACTIVE_FIXED)
                infopacket->sb[6] |= 0x04;
 
+       // For v1 & 2 infoframes program nominal if non-fs mode, otherwise full range
        /* PB7 = FreeSync Minimum refresh rate (Hz) */
-       infopacket->sb[7] = (unsigned char)((vrr->min_refresh_in_uhz + 500000) / 1000000);
+       if (vrr->state == VRR_STATE_ACTIVE_VARIABLE ||
+                       vrr->state == VRR_STATE_ACTIVE_FIXED) {
+               infopacket->sb[7] = (unsigned char)((vrr->min_refresh_in_uhz + 500000) / 1000000);
+       } else {
+               infopacket->sb[7] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000);
+       }
 
        /* PB8 = FreeSync Maximum refresh rate (Hz)
         * Note: We should never go above the field rate of the mode timing set.
         */
        infopacket->sb[8] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000);
 
+       //FreeSync HDR
+       infopacket->sb[9] = 0;
+       infopacket->sb[10] = 0;
+}
+
+static void build_vrr_infopacket_data_v3(const struct mod_vrr_params *vrr,
+               struct dc_info_packet *infopacket)
+{
+       /* PB1 = 0x1A (24bit AMD IEEE OUI (0x00001A) - Byte 0) */
+       infopacket->sb[1] = 0x1A;
+
+       /* PB2 = 0x00 (24bit AMD IEEE OUI (0x00001A) - Byte 1) */
+       infopacket->sb[2] = 0x00;
+
+       /* PB3 = 0x00 (24bit AMD IEEE OUI (0x00001A) - Byte 2) */
+       infopacket->sb[3] = 0x00;
+
+       /* PB4 = Reserved */
+
+       /* PB5 = Reserved */
+
+       /* PB6 = [Bits 7:3 = Reserved] */
+
+       /* PB6 = [Bit 0 = FreeSync Supported] */
+       if (vrr->state != VRR_STATE_UNSUPPORTED)
+               infopacket->sb[6] |= 0x01;
+
+       /* PB6 = [Bit 1 = FreeSync Enabled] */
+       if (vrr->state != VRR_STATE_DISABLED &&
+                       vrr->state != VRR_STATE_UNSUPPORTED)
+               infopacket->sb[6] |= 0x02;
+
+       /* PB6 = [Bit 2 = FreeSync Active] */
+       if (vrr->state == VRR_STATE_ACTIVE_VARIABLE ||
+                       vrr->state == VRR_STATE_ACTIVE_FIXED)
+               infopacket->sb[6] |= 0x04;
+
+       if (vrr->state == VRR_STATE_ACTIVE_FIXED) {
+               /* PB7 = FreeSync Minimum refresh rate (Hz) */
+               infopacket->sb[7] = (unsigned char)((vrr->fixed_refresh_in_uhz + 500000) / 1000000);
+               /* PB8 = FreeSync Maximum refresh rate (Hz) */
+               infopacket->sb[8] = (unsigned char)((vrr->fixed_refresh_in_uhz + 500000) / 1000000);
+       } else if (vrr->state == VRR_STATE_ACTIVE_VARIABLE) {
+               /* PB7 = FreeSync Minimum refresh rate (Hz) */
+               infopacket->sb[7] = (unsigned char)((vrr->min_refresh_in_uhz + 500000) / 1000000);
+               /* PB8 = FreeSync Maximum refresh rate (Hz) */
+               infopacket->sb[8] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000);
+       } else {
+               // Non-fs case, program nominal range
+               /* PB7 = FreeSync Minimum refresh rate (Hz) */
+               infopacket->sb[7] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000);
+               /* PB8 = FreeSync Maximum refresh rate (Hz) */
+               infopacket->sb[8] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000);
+       }
 
        //FreeSync HDR
        infopacket->sb[9] = 0;
@@ -678,7 +738,7 @@ static void build_vrr_infopacket_v1(enum signal_type signal,
        unsigned int payload_size = 0;
 
        build_vrr_infopacket_header_v1(signal, infopacket, &payload_size);
-       build_vrr_infopacket_data(vrr, infopacket);
+       build_vrr_infopacket_data_v1(vrr, infopacket);
        build_vrr_infopacket_checksum(&payload_size, infopacket);
 
        infopacket->valid = true;
@@ -692,7 +752,24 @@ static void build_vrr_infopacket_v2(enum signal_type signal,
        unsigned int payload_size = 0;
 
        build_vrr_infopacket_header_v2(signal, infopacket, &payload_size);
-       build_vrr_infopacket_data(vrr, infopacket);
+       build_vrr_infopacket_data_v1(vrr, infopacket);
+
+       build_vrr_infopacket_fs2_data(app_tf, infopacket);
+
+       build_vrr_infopacket_checksum(&payload_size, infopacket);
+
+       infopacket->valid = true;
+}
+
+static void build_vrr_infopacket_v3(enum signal_type signal,
+               const struct mod_vrr_params *vrr,
+               enum color_transfer_func app_tf,
+               struct dc_info_packet *infopacket)
+{
+       unsigned int payload_size = 0;
+
+       build_vrr_infopacket_header_v2(signal, infopacket, &payload_size);
+       build_vrr_infopacket_data_v3(vrr, infopacket);
 
        build_vrr_infopacket_fs2_data(app_tf, infopacket);
 
@@ -717,11 +794,14 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
                return;
 
        switch (packet_type) {
-       case PACKET_TYPE_FS2:
+       case PACKET_TYPE_FS_V3:
+               build_vrr_infopacket_v3(stream->signal, vrr, app_tf, infopacket);
+               break;
+       case PACKET_TYPE_FS_V2:
                build_vrr_infopacket_v2(stream->signal, vrr, app_tf, infopacket);
                break;
        case PACKET_TYPE_VRR:
-       case PACKET_TYPE_FS1:
+       case PACKET_TYPE_FS_V1:
        default:
                build_vrr_infopacket_v1(stream->signal, vrr, infopacket);
        }
@@ -793,6 +873,11 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
                                calc_duration_in_us_from_refresh_in_uhz(
                                                (unsigned int)max_refresh_in_uhz);
 
+               if (in_config->state == VRR_STATE_ACTIVE_FIXED)
+                       in_out_vrr->fixed_refresh_in_uhz = in_config->fixed_refresh_in_uhz;
+               else
+                       in_out_vrr->fixed_refresh_in_uhz = 0;
+
                refresh_range = in_out_vrr->max_refresh_in_uhz -
                                in_out_vrr->min_refresh_in_uhz;
 
@@ -843,7 +928,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
                                in_out_vrr->min_refresh_in_uhz);
        } else if (in_out_vrr->state == VRR_STATE_ACTIVE_FIXED) {
                in_out_vrr->fixed.target_refresh_in_uhz =
-                               in_out_vrr->min_refresh_in_uhz;
+                               in_out_vrr->fixed_refresh_in_uhz;
                if (in_out_vrr->fixed.ramping_active &&
                                in_out_vrr->fixed.fixed_active) {
                        /* Do not update vtotals if ramping is already active
index cc1d3f470b99f0a8836c44252bbca3eecc9ffd72..e9fbd94f8635e7654c7a701f604c8ceecdfd5c15 100644 (file)
@@ -328,7 +328,8 @@ enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp,
        /* add display to connection */
        hdcp->connection.link = *link;
        *display_container = *display;
-       status = mod_hdcp_add_display_to_topology(hdcp, display->index);
+       status = mod_hdcp_add_display_to_topology(hdcp, display_container);
+
        if (status != MOD_HDCP_STATUS_SUCCESS)
                goto out;
 
@@ -374,7 +375,7 @@ enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp,
        status = mod_hdcp_remove_display_from_topology(hdcp, index);
        if (status != MOD_HDCP_STATUS_SUCCESS)
                goto out;
-       display->state = MOD_HDCP_DISPLAY_INACTIVE;
+       memset(display, 0, sizeof(struct mod_hdcp_display));
 
        /* request authentication when connection is not reset */
        if (current_state(hdcp) != HDCP_UNINITIALIZED)
index 5cb4546be0ef04c7e8a66f1ed33f7c2e4f9641e8..b0cefed2eb02660b7f43d5ad7b83a9d2edbbb453 100644 (file)
@@ -328,7 +328,7 @@ void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size,
 
 /* psp functions */
 enum mod_hdcp_status mod_hdcp_add_display_to_topology(
-               struct mod_hdcp *hdcp, uint8_t index);
+               struct mod_hdcp *hdcp, struct mod_hdcp_display *display);
 enum mod_hdcp_status mod_hdcp_remove_display_from_topology(
                struct mod_hdcp *hdcp, uint8_t index);
 enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp);
@@ -357,8 +357,6 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_stream_management(
                struct mod_hdcp *hdcp);
 enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(
                struct mod_hdcp *hdcp);
-enum mod_hdcp_status mod_hdcp_hdcp2_get_link_encryption_status(struct mod_hdcp *hdcp,
-                                                              enum mod_hdcp_encryption_status *encryption_status);
 
 /* ddc functions */
 enum mod_hdcp_status mod_hdcp_read_bksv(struct mod_hdcp *hdcp);
@@ -503,11 +501,6 @@ static inline uint8_t is_display_active(struct mod_hdcp_display *display)
        return display->state >= MOD_HDCP_DISPLAY_ACTIVE;
 }
 
-static inline uint8_t is_display_added(struct mod_hdcp_display *display)
-{
-       return display->state >= MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
-}
-
 static inline uint8_t is_display_encryption_enabled(struct mod_hdcp_display *display)
 {
        return display->state >= MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
@@ -515,34 +508,23 @@ static inline uint8_t is_display_encryption_enabled(struct mod_hdcp_display *dis
 
 static inline uint8_t get_active_display_count(struct mod_hdcp *hdcp)
 {
-       uint8_t added_count = 0;
+       uint8_t active_count = 0;
        uint8_t i;
 
        for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
                if (is_display_active(&hdcp->displays[i]))
-                       added_count++;
-       return added_count;
-}
-
-static inline uint8_t get_added_display_count(struct mod_hdcp *hdcp)
-{
-       uint8_t added_count = 0;
-       uint8_t i;
-
-       for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
-               if (is_display_added(&hdcp->displays[i]))
-                       added_count++;
-       return added_count;
+                       active_count++;
+       return active_count;
 }
 
-static inline struct mod_hdcp_display *get_first_added_display(
+static inline struct mod_hdcp_display *get_first_active_display(
                struct mod_hdcp *hdcp)
 {
        uint8_t i;
        struct mod_hdcp_display *display = NULL;
 
        for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
-               if (is_display_added(&hdcp->displays[i])) {
+               if (is_display_active(&hdcp->displays[i])) {
                        display = &hdcp->displays[i];
                        break;
                }
index 37c8c05497d66487c697712a58bf708deb001e47..f244b72e74e06969a47abab0096948795fa3bc38 100644 (file)
@@ -129,7 +129,7 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
 static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
 {
        /* device count must be greater than or equal to tracked hdcp displays */
-       return (get_device_count(hdcp) < get_added_display_count(hdcp)) ?
+       return (get_device_count(hdcp) < get_active_display_count(hdcp)) ?
                        MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE :
                        MOD_HDCP_STATUS_SUCCESS;
 }
index 491c00f48026e285d6c9a89c68fb17756ed51657..549c113abcf7fa438de7fb19d70b8e06e07231f6 100644 (file)
@@ -208,7 +208,7 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
 static enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
 {
        /* device count must be greater than or equal to tracked hdcp displays */
-       return (get_device_count(hdcp) < get_added_display_count(hdcp)) ?
+       return (get_device_count(hdcp) < get_active_display_count(hdcp)) ?
                        MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE :
                        MOD_HDCP_STATUS_SUCCESS;
 }
index 44956f9ba17844bf19250246bee0fb3686394f85..fb6a19d020f9556d6102d0f331b92c99e4d8f763 100644 (file)
@@ -98,8 +98,8 @@ char *mod_hdcp_status_to_str(int32_t status)
                return "MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE";
        case MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED:
                return "MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED";
-       case MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION:
-               return "MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION";
+       case MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE:
+               return "MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE";
        case MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE:
                return "MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE";
        case MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE:
@@ -158,8 +158,8 @@ char *mod_hdcp_status_to_str(int32_t status)
                return "MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED";
        case MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY:
                return "MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY";
-       case MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION:
-               return "MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION";
+       case MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE:
+               return "MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE";
        case MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING:
                return "MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING";
        case MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE:
index c2929815c3ee977d685df8ed9e287b1175484cf4..fb1161dd7ea809d2c2e69201589a485233276188 100644 (file)
@@ -51,12 +51,15 @@ enum mod_hdcp_status mod_hdcp_remove_display_from_topology(
        struct ta_dtm_shared_memory *dtm_cmd;
        struct mod_hdcp_display *display =
                        get_active_display_at_index(hdcp, index);
+       enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
        dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
 
-       if (!display || !is_display_added(display))
+       if (!display || !is_display_active(display))
                return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
 
+       mutex_lock(&psp->dtm_context.mutex);
+
        memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
 
        dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
@@ -66,34 +69,33 @@ enum mod_hdcp_status mod_hdcp_remove_display_from_topology(
 
        psp_dtm_invoke(psp, dtm_cmd->cmd_id);
 
-       if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
+       if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) {
+               status = MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
+       } else {
+               display->state = MOD_HDCP_DISPLAY_ACTIVE;
+               HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index);
+       }
 
-       display->state = MOD_HDCP_DISPLAY_ACTIVE;
-       HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index);
-       return MOD_HDCP_STATUS_SUCCESS;
+       mutex_unlock(&psp->dtm_context.mutex);
+       return status;
 }
 enum mod_hdcp_status mod_hdcp_add_display_to_topology(struct mod_hdcp *hdcp,
-                                                     uint8_t index)
+                                              struct mod_hdcp_display *display)
 {
        struct psp_context *psp = hdcp->config.psp.handle;
        struct ta_dtm_shared_memory *dtm_cmd;
-       struct mod_hdcp_display *display =
-                       get_active_display_at_index(hdcp, index);
        struct mod_hdcp_link *link = &hdcp->connection.link;
+       enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
        if (!psp->dtm_context.dtm_initialized) {
                DRM_ERROR("Failed to add display topology, DTM TA is not initialized.");
+               display->state = MOD_HDCP_DISPLAY_INACTIVE;
                return MOD_HDCP_STATUS_FAILURE;
        }
 
-       if (!display || is_display_added(display))
-               return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
-
        dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
 
+       mutex_lock(&psp->dtm_context.mutex);
        memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
 
        dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
@@ -113,21 +115,24 @@ enum mod_hdcp_status mod_hdcp_add_display_to_topology(struct mod_hdcp *hdcp,
 
        psp_dtm_invoke(psp, dtm_cmd->cmd_id);
 
-       if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
-
-       display->state = MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
-       HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index);
+       if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) {
+               display->state = MOD_HDCP_DISPLAY_INACTIVE;
+               status = MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
+       } else {
+               HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index);
+       }
 
-       return MOD_HDCP_STATUS_SUCCESS;
+       mutex_unlock(&psp->dtm_context.mutex);
+       return status;
 }
 
 enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
 {
 
        struct psp_context *psp = hdcp->config.psp.handle;
-       struct mod_hdcp_display *display = get_first_added_display(hdcp);
+       struct mod_hdcp_display *display = get_first_active_display(hdcp);
        struct ta_hdcp_shared_memory *hdcp_cmd;
+       enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
        if (!psp->hdcp_context.hdcp_initialized) {
                DRM_ERROR("Failed to create hdcp session. HDCP TA is not initialized.");
@@ -135,6 +140,8 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
        }
 
        hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+
+       mutex_lock(&psp->hdcp_context.mutex);
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
        hdcp_cmd->in_msg.hdcp1_create_session.display_handle = display->index;
@@ -144,16 +151,18 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
 
        hdcp->auth.id = hdcp_cmd->out_msg.hdcp1_create_session.session_handle;
 
-       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE;
-
-       hdcp->auth.msg.hdcp1.ainfo = hdcp_cmd->out_msg.hdcp1_create_session.ainfo_primary;
-       memcpy(hdcp->auth.msg.hdcp1.aksv, hdcp_cmd->out_msg.hdcp1_create_session.aksv_primary,
-               sizeof(hdcp->auth.msg.hdcp1.aksv));
-       memcpy(hdcp->auth.msg.hdcp1.an, hdcp_cmd->out_msg.hdcp1_create_session.an_primary,
-               sizeof(hdcp->auth.msg.hdcp1.an));
+       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+               status = MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE;
+       } else {
+               hdcp->auth.msg.hdcp1.ainfo = hdcp_cmd->out_msg.hdcp1_create_session.ainfo_primary;
+               memcpy(hdcp->auth.msg.hdcp1.aksv, hdcp_cmd->out_msg.hdcp1_create_session.aksv_primary,
+                      sizeof(hdcp->auth.msg.hdcp1.aksv));
+               memcpy(hdcp->auth.msg.hdcp1.an, hdcp_cmd->out_msg.hdcp1_create_session.an_primary,
+                      sizeof(hdcp->auth.msg.hdcp1.an));
+       }
 
-       return MOD_HDCP_STATUS_SUCCESS;
+       mutex_unlock(&psp->hdcp_context.mutex);
+       return status;
 }
 
 enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp)
@@ -162,7 +171,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp)
        struct psp_context *psp = hdcp->config.psp.handle;
        struct ta_hdcp_shared_memory *hdcp_cmd;
        uint8_t i = 0;
+       enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
+       mutex_lock(&psp->hdcp_context.mutex);
        hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
@@ -171,27 +182,30 @@ enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp)
 
        psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
 
-       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE;
-
-       HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp);
-       for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
-               if (is_display_encryption_enabled(
-                               &hdcp->displays[i])) {
-                       hdcp->displays[i].state =
-                                       MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
-                       HDCP_HDCP1_DISABLED_TRACE(hdcp,
-                                       hdcp->displays[i].index);
-               }
+       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+               status = MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE;
+       } else {
+               HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp);
+               for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+                       if (is_display_encryption_enabled(&hdcp->displays[i])) {
+                               hdcp->displays[i].state =
+                                                       MOD_HDCP_DISPLAY_ACTIVE;
+                               HDCP_HDCP1_DISABLED_TRACE(
+                                       hdcp, hdcp->displays[i].index);
+                       }
+       }
 
-       return MOD_HDCP_STATUS_SUCCESS;
+       mutex_unlock(&psp->hdcp_context.mutex);
+       return status;
 }
 
 enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp)
 {
        struct psp_context *psp = hdcp->config.psp.handle;
        struct ta_hdcp_shared_memory *hdcp_cmd;
+       enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
+       mutex_lock(&psp->hdcp_context.mutex);
        hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
@@ -206,10 +220,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp)
 
        psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
 
-       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE;
-
-       if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status ==
+       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+               status = MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE;
+       } else if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status ==
            TA_HDCP_AUTHENTICATION_STATUS__HDCP1_FIRST_PART_COMPLETE) {
                /* needs second part of authentication */
                hdcp->connection.is_repeater = 1;
@@ -219,20 +232,22 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp)
        } else if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status ==
                   TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_REVOKED) {
                hdcp->connection.is_hdcp1_revoked = 1;
-               return MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED;
+               status = MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED;
        } else
-               return MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE;
-
+               status = MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE;
 
-       return MOD_HDCP_STATUS_SUCCESS;
+       mutex_unlock(&psp->hdcp_context.mutex);
+       return status;
 }
 
 enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp)
 {
        struct psp_context *psp = hdcp->config.psp.handle;
        struct ta_hdcp_shared_memory *hdcp_cmd;
-       struct mod_hdcp_display *display = get_first_added_display(hdcp);
+       struct mod_hdcp_display *display = get_first_active_display(hdcp);
+       enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
+       mutex_lock(&psp->hdcp_context.mutex);
        hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
@@ -241,14 +256,15 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp)
 
        psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
 
-       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION;
-
-       if (!is_dp_mst_hdcp(hdcp)) {
+       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+               status = MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE;
+       } else if (!is_dp_mst_hdcp(hdcp)) {
                display->state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
                HDCP_HDCP1_ENABLED_TRACE(hdcp, display->index);
        }
-       return MOD_HDCP_STATUS_SUCCESS;
+
+       mutex_unlock(&psp->hdcp_context.mutex);
+       return status;
 }
 
 enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp)
@@ -257,6 +273,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp)
        struct ta_hdcp_shared_memory *hdcp_cmd;
        enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
+       mutex_lock(&psp->hdcp_context.mutex);
        hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
@@ -287,6 +304,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp)
                status = MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE;
        }
 
+       mutex_unlock(&psp->hdcp_context.mutex);
        return status;
 }
 
@@ -296,14 +314,15 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp
        struct psp_context *psp = hdcp->config.psp.handle;
        struct ta_hdcp_shared_memory *hdcp_cmd;
        int i = 0;
+       enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
+       mutex_lock(&psp->hdcp_context.mutex);
        hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
 
        for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
 
-               if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED ||
-                   hdcp->displays[i].adjust.disable)
-                       continue;
+               if (hdcp->displays[i].adjust.disable || hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE)
+                               continue;
 
                memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
@@ -313,21 +332,26 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp
 
                psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
 
-               if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
-                       return MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE;
+               if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+                       status = MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE;
+                       break;
+               }
 
                hdcp->displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
                HDCP_HDCP1_ENABLED_TRACE(hdcp, hdcp->displays[i].index);
        }
 
-       return MOD_HDCP_STATUS_SUCCESS;
+       mutex_unlock(&psp->hdcp_context.mutex);
+       return status;
 }
 
 enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp)
 {
        struct psp_context *psp = hdcp->config.psp.handle;
        struct ta_hdcp_shared_memory *hdcp_cmd;
+       enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
+       mutex_lock(&psp->hdcp_context.mutex);
        hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
 
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -339,12 +363,12 @@ enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp)
 
        psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
 
-       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE;
+       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS ||
+                       hdcp_cmd->out_msg.hdcp1_get_encryption_status.protection_level != 1)
+               status = MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE;
 
-       return (hdcp_cmd->out_msg.hdcp1_get_encryption_status.protection_level == 1)
-                      ? MOD_HDCP_STATUS_SUCCESS
-                      : MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE;
+       mutex_unlock(&psp->hdcp_context.mutex);
+       return status;
 }
 
 enum mod_hdcp_status mod_hdcp_hdcp1_get_link_encryption_status(struct mod_hdcp *hdcp,
@@ -364,19 +388,23 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
 {
        struct psp_context *psp = hdcp->config.psp.handle;
        struct ta_hdcp_shared_memory *hdcp_cmd;
-       struct mod_hdcp_display *display = get_first_added_display(hdcp);
+       struct mod_hdcp_display *display = get_first_active_display(hdcp);
+       enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
 
        if (!psp->hdcp_context.hdcp_initialized) {
                DRM_ERROR("Failed to create hdcp session, HDCP TA is not initialized");
                return MOD_HDCP_STATUS_FAILURE;
        }
 
-       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
-       memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
-
        if (!display)
                return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
 
+       mutex_lock(&psp->hdcp_context.mutex);
+
+       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+       memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
        hdcp_cmd->in_msg.hdcp2_create_session_v2.display_handle = display->index;
 
        if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0)
@@ -393,12 +421,14 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
 
        psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
 
-       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE;
 
-       hdcp->auth.id = hdcp_cmd->out_msg.hdcp2_create_session_v2.session_handle;
+       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+               status = MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE;
+       else
+               hdcp->auth.id = hdcp_cmd->out_msg.hdcp2_create_session_v2.session_handle;
 
-       return MOD_HDCP_STATUS_SUCCESS;
+       mutex_unlock(&psp->hdcp_context.mutex);
+       return status;
 }
 
 enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp)
@@ -406,7 +436,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp)
        struct psp_context *psp = hdcp->config.psp.handle;
        struct ta_hdcp_shared_memory *hdcp_cmd;
        uint8_t i = 0;
+       enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
+       mutex_lock(&psp->hdcp_context.mutex);
        hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
@@ -415,20 +447,21 @@ enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp)
 
        psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
 
-       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE;
-
-       HDCP_TOP_HDCP2_DESTROY_SESSION_TRACE(hdcp);
-       for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
-               if (is_display_encryption_enabled(
-                               &hdcp->displays[i])) {
-                       hdcp->displays[i].state =
-                                       MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
-                       HDCP_HDCP2_DISABLED_TRACE(hdcp,
-                                       hdcp->displays[i].index);
-               }
+       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+               status = MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE;
+       } else {
+               HDCP_TOP_HDCP2_DESTROY_SESSION_TRACE(hdcp);
+               for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+                       if (is_display_encryption_enabled(&hdcp->displays[i])) {
+                               hdcp->displays[i].state =
+                                                       MOD_HDCP_DISPLAY_ACTIVE;
+                               HDCP_HDCP2_DISABLED_TRACE(
+                                       hdcp, hdcp->displays[i].index);
+                       }
+       }
 
-       return MOD_HDCP_STATUS_SUCCESS;
+       mutex_unlock(&psp->hdcp_context.mutex);
+       return status;
 }
 
 enum mod_hdcp_status mod_hdcp_hdcp2_prepare_ake_init(struct mod_hdcp *hdcp)
@@ -437,7 +470,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_ake_init(struct mod_hdcp *hdcp)
        struct ta_hdcp_shared_memory *hdcp_cmd;
        struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
        struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+       enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
+       mutex_lock(&psp->hdcp_context.mutex);
        hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
@@ -452,12 +487,13 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_ake_init(struct mod_hdcp *hdcp)
        psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
 
        if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE;
-
-       memcpy(&hdcp->auth.msg.hdcp2.ake_init[0], &msg_out->prepare.transmitter_message[0],
-              sizeof(hdcp->auth.msg.hdcp2.ake_init));
+               status = MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE;
+       else
+               memcpy(&hdcp->auth.msg.hdcp2.ake_init[0], &msg_out->prepare.transmitter_message[0],
+                      sizeof(hdcp->auth.msg.hdcp2.ake_init));
 
-       return MOD_HDCP_STATUS_SUCCESS;
+       mutex_unlock(&psp->hdcp_context.mutex);
+       return status;
 }
 
 enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp)
@@ -466,7 +502,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp)
        struct ta_hdcp_shared_memory *hdcp_cmd;
        struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
        struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+       enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
+       mutex_lock(&psp->hdcp_context.mutex);
        hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
@@ -488,26 +526,32 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp)
 
        psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
 
-       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE;
-
-       memcpy(hdcp->auth.msg.hdcp2.ake_no_stored_km, &msg_out->prepare.transmitter_message[0],
-              sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km));
-
-       memcpy(hdcp->auth.msg.hdcp2.ake_stored_km,
-              &msg_out->prepare.transmitter_message[sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)],
-              sizeof(hdcp->auth.msg.hdcp2.ake_stored_km));
-
-       if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) {
-               hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0;
-               hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0;
-               return MOD_HDCP_STATUS_SUCCESS;
-       } else if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
-               hdcp->connection.is_hdcp2_revoked = 1;
-               return MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED;
+       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+               status = MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE;
+       } else {
+               memcpy(hdcp->auth.msg.hdcp2.ake_no_stored_km,
+                      &msg_out->prepare.transmitter_message[0],
+                      sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km));
+
+               memcpy(hdcp->auth.msg.hdcp2.ake_stored_km,
+                      &msg_out->prepare.transmitter_message[sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)],
+                      sizeof(hdcp->auth.msg.hdcp2.ake_stored_km));
+
+               if (msg_out->process.msg1_status ==
+                   TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) {
+                       hdcp->connection.is_km_stored =
+                               msg_out->process.is_km_stored ? 1 : 0;
+                       hdcp->connection.is_repeater =
+                               msg_out->process.is_repeater ? 1 : 0;
+                       status = MOD_HDCP_STATUS_SUCCESS;
+               } else if (msg_out->process.msg1_status ==
+                          TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
+                       hdcp->connection.is_hdcp2_revoked = 1;
+                       status = MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED;
+               }
        }
-
-       return MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE;
+       mutex_unlock(&psp->hdcp_context.mutex);
+       return status;
 }
 
 enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp)
@@ -516,7 +560,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp)
        struct ta_hdcp_shared_memory *hdcp_cmd;
        struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
        struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+       enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
+       mutex_lock(&psp->hdcp_context.mutex);
        hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
@@ -543,16 +589,15 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp)
        psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
 
        if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE;
-
-       if (msg_out->process.msg1_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE;
+               status = MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE;
+       else if (msg_out->process.msg1_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
+               status = MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE;
        else if (!hdcp->connection.is_km_stored &&
-                msg_out->process.msg2_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE;
-
+                  msg_out->process.msg2_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
+               status = MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE;
 
-       return MOD_HDCP_STATUS_SUCCESS;
+       mutex_unlock(&psp->hdcp_context.mutex);
+       return status;
 }
 
 enum mod_hdcp_status mod_hdcp_hdcp2_prepare_lc_init(struct mod_hdcp *hdcp)
@@ -561,7 +606,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_lc_init(struct mod_hdcp *hdcp)
        struct ta_hdcp_shared_memory *hdcp_cmd;
        struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
        struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+       enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
+       mutex_lock(&psp->hdcp_context.mutex);
        hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
@@ -577,12 +624,13 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_lc_init(struct mod_hdcp *hdcp)
        psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
 
        if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE;
-
-       memcpy(hdcp->auth.msg.hdcp2.lc_init, &msg_out->prepare.transmitter_message[0],
-              sizeof(hdcp->auth.msg.hdcp2.lc_init));
+               status = MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE;
+       else
+               memcpy(hdcp->auth.msg.hdcp2.lc_init, &msg_out->prepare.transmitter_message[0],
+                      sizeof(hdcp->auth.msg.hdcp2.lc_init));
 
-       return MOD_HDCP_STATUS_SUCCESS;
+       mutex_unlock(&psp->hdcp_context.mutex);
+       return status;
 }
 
 enum mod_hdcp_status mod_hdcp_hdcp2_validate_l_prime(struct mod_hdcp *hdcp)
@@ -591,7 +639,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_l_prime(struct mod_hdcp *hdcp)
        struct ta_hdcp_shared_memory *hdcp_cmd;
        struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
        struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+       enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
+       mutex_lock(&psp->hdcp_context.mutex);
        hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
@@ -610,13 +660,12 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_l_prime(struct mod_hdcp *hdcp)
 
        psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
 
-       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE;
-
-       if (msg_out->process.msg1_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE;
+       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS ||
+                       msg_out->process.msg1_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
+               status = MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE;
 
-       return MOD_HDCP_STATUS_SUCCESS;
+       mutex_unlock(&psp->hdcp_context.mutex);
+       return status;
 }
 
 enum mod_hdcp_status mod_hdcp_hdcp2_prepare_eks(struct mod_hdcp *hdcp)
@@ -625,7 +674,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_eks(struct mod_hdcp *hdcp)
        struct ta_hdcp_shared_memory *hdcp_cmd;
        struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
        struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+       enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
+       mutex_lock(&psp->hdcp_context.mutex);
        hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
@@ -642,48 +693,55 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_eks(struct mod_hdcp *hdcp)
        hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
        psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
 
-       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE;
-
-       memcpy(hdcp->auth.msg.hdcp2.ske_eks, &msg_out->prepare.transmitter_message[0],
-              sizeof(hdcp->auth.msg.hdcp2.ske_eks));
-       msg_out->prepare.msg1_desc.msg_size = sizeof(hdcp->auth.msg.hdcp2.ske_eks);
-
-       if (is_dp_hdcp(hdcp)) {
-               memcpy(hdcp->auth.msg.hdcp2.content_stream_type_dp,
-                      &msg_out->prepare.transmitter_message[sizeof(hdcp->auth.msg.hdcp2.ske_eks)],
-                      sizeof(hdcp->auth.msg.hdcp2.content_stream_type_dp));
+       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+               status = MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE;
+       } else {
+               memcpy(hdcp->auth.msg.hdcp2.ske_eks,
+                      &msg_out->prepare.transmitter_message[0],
+                      sizeof(hdcp->auth.msg.hdcp2.ske_eks));
+               msg_out->prepare.msg1_desc.msg_size =
+                       sizeof(hdcp->auth.msg.hdcp2.ske_eks);
+
+               if (is_dp_hdcp(hdcp)) {
+                       memcpy(hdcp->auth.msg.hdcp2.content_stream_type_dp,
+                              &msg_out->prepare.transmitter_message[sizeof(hdcp->auth.msg.hdcp2.ske_eks)],
+                              sizeof(hdcp->auth.msg.hdcp2.content_stream_type_dp));
+               }
        }
+       mutex_unlock(&psp->hdcp_context.mutex);
 
-       return MOD_HDCP_STATUS_SUCCESS;
+       return status;
 }
 
 enum mod_hdcp_status mod_hdcp_hdcp2_enable_encryption(struct mod_hdcp *hdcp)
 {
        struct psp_context *psp = hdcp->config.psp.handle;
        struct ta_hdcp_shared_memory *hdcp_cmd;
-       struct mod_hdcp_display *display = get_first_added_display(hdcp);
-
-       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
-       memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+       struct mod_hdcp_display *display = get_first_active_display(hdcp);
+       enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
        if (!display)
                return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
 
+       mutex_lock(&psp->hdcp_context.mutex);
+
+       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+       memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
        hdcp_cmd->in_msg.hdcp2_set_encryption.session_handle = hdcp->auth.id;
 
        hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_SET_ENCRYPTION;
        psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
 
-       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE;
-
-       if (!is_dp_mst_hdcp(hdcp)) {
+       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+               status = MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE;
+       } else if (!is_dp_mst_hdcp(hdcp)) {
                display->state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
                HDCP_HDCP2_ENABLED_TRACE(hdcp, display->index);
        }
 
-       return MOD_HDCP_STATUS_SUCCESS;
+       mutex_unlock(&psp->hdcp_context.mutex);
+       return status;
 }
 
 enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp)
@@ -692,6 +750,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp)
        struct ta_hdcp_shared_memory *hdcp_cmd;
        struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
        struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+       enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+       mutex_lock(&psp->hdcp_context.mutex);
 
        hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -712,23 +773,26 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp)
 
        psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
 
-       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE;
-
-       memcpy(hdcp->auth.msg.hdcp2.repeater_auth_ack, &msg_out->prepare.transmitter_message[0],
-              sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack));
-
-       if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) {
-               hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0;
-               hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0;
-               return MOD_HDCP_STATUS_SUCCESS;
-       } else if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
-               hdcp->connection.is_hdcp2_revoked = 1;
-               return MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED;
+       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+               status = MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE;
+       } else {
+               memcpy(hdcp->auth.msg.hdcp2.repeater_auth_ack,
+                      &msg_out->prepare.transmitter_message[0],
+                      sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack));
+
+               if (msg_out->process.msg1_status ==
+                   TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) {
+                       hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0;
+                       hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0;
+                       status = MOD_HDCP_STATUS_SUCCESS;
+               } else if (msg_out->process.msg1_status ==
+                          TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
+                       hdcp->connection.is_hdcp2_revoked = 1;
+                       status = MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED;
+               }
        }
-
-
-       return MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE;
+       mutex_unlock(&psp->hdcp_context.mutex);
+       return status;
 }
 
 enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp *hdcp)
@@ -737,7 +801,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp
        struct ta_hdcp_shared_memory *hdcp_cmd;
        struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
        uint8_t i;
+       enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
+       mutex_lock(&psp->hdcp_context.mutex);
        hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
@@ -747,9 +813,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp
 
 
        for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
-               if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED ||
-                   hdcp->displays[i].adjust.disable)
-                       continue;
+               if (hdcp->displays[i].adjust.disable || hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE)
+                               continue;
+
                hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.display_handle = hdcp->displays[i].index;
                hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.session_handle = hdcp->auth.id;
 
@@ -763,8 +829,13 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp
                HDCP_HDCP2_ENABLED_TRACE(hdcp, hdcp->displays[i].index);
        }
 
-       return (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS) ? MOD_HDCP_STATUS_SUCCESS
-                                                                 : MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION;
+       if (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS)
+               status = MOD_HDCP_STATUS_SUCCESS;
+       else
+               status = MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE;
+
+       mutex_unlock(&psp->hdcp_context.mutex);
+       return status;
 }
 
 enum mod_hdcp_status mod_hdcp_hdcp2_prepare_stream_management(struct mod_hdcp *hdcp)
@@ -774,7 +845,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_stream_management(struct mod_hdcp *h
        struct ta_hdcp_shared_memory *hdcp_cmd;
        struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
        struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+       enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
+       mutex_lock(&psp->hdcp_context.mutex);
        hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
@@ -789,15 +862,17 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_stream_management(struct mod_hdcp *h
        hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
        psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
 
-       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE;
-
-       hdcp->auth.msg.hdcp2.stream_manage_size = msg_out->prepare.msg1_desc.msg_size;
-
-       memcpy(hdcp->auth.msg.hdcp2.repeater_auth_stream_manage, &msg_out->prepare.transmitter_message[0],
-              sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_manage));
+       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+               status = MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE;
+       } else {
+               hdcp->auth.msg.hdcp2.stream_manage_size = msg_out->prepare.msg1_desc.msg_size;
 
-       return MOD_HDCP_STATUS_SUCCESS;
+               memcpy(hdcp->auth.msg.hdcp2.repeater_auth_stream_manage,
+                      &msg_out->prepare.transmitter_message[0],
+                      sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_manage));
+       }
+       mutex_unlock(&psp->hdcp_context.mutex);
+       return status;
 }
 
 enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(struct mod_hdcp *hdcp)
@@ -806,7 +881,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(struct mod_hdcp *hdcp)
        struct ta_hdcp_shared_memory *hdcp_cmd;
        struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
        struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+       enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
+       mutex_lock(&psp->hdcp_context.mutex);
        hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
@@ -825,38 +902,13 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(struct mod_hdcp *hdcp)
        hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
        psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
 
-       return (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS) &&
-                              (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
-                      ? MOD_HDCP_STATUS_SUCCESS
-                      : MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE;
-}
-
-enum mod_hdcp_status mod_hdcp_hdcp2_get_link_encryption_status(struct mod_hdcp *hdcp,
-                                                              enum mod_hdcp_encryption_status *encryption_status)
-{
-       struct psp_context *psp = hdcp->config.psp.handle;
-       struct ta_hdcp_shared_memory *hdcp_cmd;
-
-       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
-
-       memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
-
-       hdcp_cmd->in_msg.hdcp2_get_encryption_status.session_handle = hdcp->auth.id;
-       hdcp_cmd->out_msg.hdcp2_get_encryption_status.protection_level = 0;
-       hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_GET_ENCRYPTION_STATUS;
-       *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
-
-       psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
-
-       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
-               return MOD_HDCP_STATUS_FAILURE;
-
-       if (hdcp_cmd->out_msg.hdcp2_get_encryption_status.protection_level == 1) {
-               if (hdcp_cmd->out_msg.hdcp2_get_encryption_status.hdcp2_type == TA_HDCP2_CONTENT_TYPE__TYPE1)
-                       *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON;
-               else
-                       *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON;
-       }
+       if (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS &&
+           msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
+               status = MOD_HDCP_STATUS_SUCCESS;
+       else
+               status = MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE;
 
-       return MOD_HDCP_STATUS_SUCCESS;
+       mutex_unlock(&psp->hdcp_context.mutex);
+       return status;
 }
+
index dbe7835aabcf747c25825e76dac9d2e047bb9025..0ba3cf7f336a82074fc9a6c4607fea4a9aed86b9 100644 (file)
@@ -83,6 +83,8 @@ struct mod_freesync_config {
        bool btr;
        unsigned int min_refresh_in_uhz;
        unsigned int max_refresh_in_uhz;
+       unsigned int fixed_refresh_in_uhz;
+
 };
 
 struct mod_vrr_params_btr {
@@ -112,6 +114,7 @@ struct mod_vrr_params {
        uint32_t max_duration_in_us;
        uint32_t max_refresh_in_uhz;
        uint32_t min_duration_in_us;
+       uint32_t fixed_refresh_in_uhz;
 
        struct dc_crtc_timing_adjust adjust;
 
index c088602bc1a03cfe413980a84b6386d10b83ec6b..eed560eecbab498dce293105ca1add9add9a5dc9 100644 (file)
@@ -60,7 +60,7 @@ enum mod_hdcp_status {
        MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY,
        MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE,
        MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED,
-       MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION,
+       MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE,
        MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE,
        MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE,
        MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE,
@@ -90,7 +90,7 @@ enum mod_hdcp_status {
        MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY,
        MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE,
        MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED,
-       MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION,
+       MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE,
        MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING,
        MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE,
        MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE,
@@ -117,7 +117,6 @@ enum mod_hdcp_operation_mode {
 enum mod_hdcp_display_state {
        MOD_HDCP_DISPLAY_INACTIVE = 0,
        MOD_HDCP_DISPLAY_ACTIVE,
-       MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED,
        MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED
 };
 
index fe21179043298c206ac43d85bd5c7c79f039aac8..198c0e64d13a824fdfacb73d40487c89775dea37 100644 (file)
@@ -40,8 +40,9 @@ enum color_transfer_func {
 
 enum vrr_packet_type {
        PACKET_TYPE_VRR,
-       PACKET_TYPE_FS1,
-       PACKET_TYPE_FS2,
+       PACKET_TYPE_FS_V1,
+       PACKET_TYPE_FS_V2,
+       PACKET_TYPE_FS_V3,
        PACKET_TYPE_VTEM
 };
 
index cff3ab15fc0cc5ced207161b57090af2a213bc67..7cd8a43d188962c45d92b05042f59bb4a4d006ec 100644 (file)
@@ -144,7 +144,7 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
        }
 
        /*VSC packet set to 2 when DP revision >= 1.2*/
-       if (stream->psr_version != 0)
+       if (stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED)
                vsc_packet_revision = vsc_packet_rev2;
 
        /* Update to revision 5 for extended colorimetry support */
index e75a4bb94488ec6fa71febd25d23e5e56853da8c..8c37bcc27132c7576cbb10459ab5a18a1306dbb8 100644 (file)
@@ -24,6 +24,9 @@
 
 #include "power_helpers.h"
 #include "dc/inc/hw/dmcu.h"
+#include "dc/inc/hw/abm.h"
+#include "dc.h"
+#include "core_types.h"
 
 #define DIV_ROUNDUP(a, b) (((a)+((b)/2))/(b))
 
@@ -237,7 +240,7 @@ static void fill_backlight_transform_table(struct dmcu_iram_parameters params,
 }
 
 static void fill_backlight_transform_table_v_2_2(struct dmcu_iram_parameters params,
-               struct iram_table_v_2_2 *table)
+               struct iram_table_v_2_2 *table, bool big_endian)
 {
        unsigned int i;
        unsigned int num_entries = NUM_BL_CURVE_SEGS;
@@ -261,10 +264,12 @@ static void fill_backlight_transform_table_v_2_2(struct dmcu_iram_parameters par
                lut_index = (params.backlight_lut_array_size - 1) * i / (num_entries - 1);
                ASSERT(lut_index < params.backlight_lut_array_size);
 
-               table->backlight_thresholds[i] =
-                       cpu_to_be16(DIV_ROUNDUP((i * 65536), num_entries));
-               table->backlight_offsets[i] =
-                       cpu_to_be16(params.backlight_lut_array[lut_index]);
+               table->backlight_thresholds[i] = (big_endian) ?
+                       cpu_to_be16(DIV_ROUNDUP((i * 65536), num_entries)) :
+                       cpu_to_le16(DIV_ROUNDUP((i * 65536), num_entries));
+               table->backlight_offsets[i] = (big_endian) ?
+                       cpu_to_be16(params.backlight_lut_array[lut_index]) :
+                       cpu_to_le16(params.backlight_lut_array[lut_index]);
        }
 }
 
@@ -584,18 +589,18 @@ void fill_iram_v_2_2(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame
        ram_table->crgb_slope[7]  = cpu_to_be16(0x1910);
 
        fill_backlight_transform_table_v_2_2(
-                       params, ram_table);
+                       params, ram_table, true);
 }
 
-void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parameters params)
+void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parameters params, bool big_endian)
 {
        unsigned int i, j;
        unsigned int set = params.set;
 
        ram_table->flags = 0x0;
-
-       ram_table->min_abm_backlight =
-                       cpu_to_be16(params.min_abm_backlight);
+       ram_table->min_abm_backlight = (big_endian) ?
+               cpu_to_be16(params.min_abm_backlight) :
+               cpu_to_le16(params.min_abm_backlight);
 
        for (i = 0; i < NUM_AGGR_LEVEL; i++) {
                ram_table->hybrid_factor[i] = abm_settings[set][i].brightness_gain;
@@ -619,33 +624,51 @@ void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame
        ram_table->iir_curve[4] = 0x65;
 
        //Gamma 2.2
-       ram_table->crgb_thresh[0] = cpu_to_be16(0x127c);
-       ram_table->crgb_thresh[1] = cpu_to_be16(0x151b);
-       ram_table->crgb_thresh[2] = cpu_to_be16(0x17d5);
-       ram_table->crgb_thresh[3] = cpu_to_be16(0x1a56);
-       ram_table->crgb_thresh[4] = cpu_to_be16(0x1c83);
-       ram_table->crgb_thresh[5] = cpu_to_be16(0x1e72);
-       ram_table->crgb_thresh[6] = cpu_to_be16(0x20f0);
-       ram_table->crgb_thresh[7] = cpu_to_be16(0x232b);
-       ram_table->crgb_offset[0] = cpu_to_be16(0x2999);
-       ram_table->crgb_offset[1] = cpu_to_be16(0x3999);
-       ram_table->crgb_offset[2] = cpu_to_be16(0x4666);
-       ram_table->crgb_offset[3] = cpu_to_be16(0x5999);
-       ram_table->crgb_offset[4] = cpu_to_be16(0x6333);
-       ram_table->crgb_offset[5] = cpu_to_be16(0x7800);
-       ram_table->crgb_offset[6] = cpu_to_be16(0x8c00);
-       ram_table->crgb_offset[7] = cpu_to_be16(0xa000);
-       ram_table->crgb_slope[0]  = cpu_to_be16(0x3609);
-       ram_table->crgb_slope[1]  = cpu_to_be16(0x2dfa);
-       ram_table->crgb_slope[2]  = cpu_to_be16(0x27ea);
-       ram_table->crgb_slope[3]  = cpu_to_be16(0x235d);
-       ram_table->crgb_slope[4]  = cpu_to_be16(0x2042);
-       ram_table->crgb_slope[5]  = cpu_to_be16(0x1dc3);
-       ram_table->crgb_slope[6]  = cpu_to_be16(0x1b1a);
-       ram_table->crgb_slope[7]  = cpu_to_be16(0x1910);
+       ram_table->crgb_thresh[0] = (big_endian) ? cpu_to_be16(0x127c) : cpu_to_le16(0x127c);
+       ram_table->crgb_thresh[1] = (big_endian) ? cpu_to_be16(0x151b) : cpu_to_le16(0x151b);
+       ram_table->crgb_thresh[2] = (big_endian) ? cpu_to_be16(0x17d5) : cpu_to_le16(0x17d5);
+       ram_table->crgb_thresh[3] = (big_endian) ? cpu_to_be16(0x1a56) : cpu_to_le16(0x1a56);
+       ram_table->crgb_thresh[4] = (big_endian) ? cpu_to_be16(0x1c83) : cpu_to_le16(0x1c83);
+       ram_table->crgb_thresh[5] = (big_endian) ? cpu_to_be16(0x1e72) : cpu_to_le16(0x1e72);
+       ram_table->crgb_thresh[6] = (big_endian) ? cpu_to_be16(0x20f0) : cpu_to_le16(0x20f0);
+       ram_table->crgb_thresh[7] = (big_endian) ? cpu_to_be16(0x232b) : cpu_to_le16(0x232b);
+       ram_table->crgb_offset[0] = (big_endian) ? cpu_to_be16(0x2999) : cpu_to_le16(0x2999);
+       ram_table->crgb_offset[1] = (big_endian) ? cpu_to_be16(0x3999) : cpu_to_le16(0x3999);
+       ram_table->crgb_offset[2] = (big_endian) ? cpu_to_be16(0x4666) : cpu_to_le16(0x4666);
+       ram_table->crgb_offset[3] = (big_endian) ? cpu_to_be16(0x5999) : cpu_to_le16(0x5999);
+       ram_table->crgb_offset[4] = (big_endian) ? cpu_to_be16(0x6333) : cpu_to_le16(0x6333);
+       ram_table->crgb_offset[5] = (big_endian) ? cpu_to_be16(0x7800) : cpu_to_le16(0x7800);
+       ram_table->crgb_offset[6] = (big_endian) ? cpu_to_be16(0x8c00) : cpu_to_le16(0x8c00);
+       ram_table->crgb_offset[7] = (big_endian) ? cpu_to_be16(0xa000) : cpu_to_le16(0xa000);
+       ram_table->crgb_slope[0]  = (big_endian) ? cpu_to_be16(0x3609) : cpu_to_le16(0x3609);
+       ram_table->crgb_slope[1]  = (big_endian) ? cpu_to_be16(0x2dfa) : cpu_to_le16(0x2dfa);
+       ram_table->crgb_slope[2]  = (big_endian) ? cpu_to_be16(0x27ea) : cpu_to_le16(0x27ea);
+       ram_table->crgb_slope[3]  = (big_endian) ? cpu_to_be16(0x235d) : cpu_to_le16(0x235d);
+       ram_table->crgb_slope[4]  = (big_endian) ? cpu_to_be16(0x2042) : cpu_to_le16(0x2042);
+       ram_table->crgb_slope[5]  = (big_endian) ? cpu_to_be16(0x1dc3) : cpu_to_le16(0x1dc3);
+       ram_table->crgb_slope[6]  = (big_endian) ? cpu_to_be16(0x1b1a) : cpu_to_le16(0x1b1a);
+       ram_table->crgb_slope[7]  = (big_endian) ? cpu_to_be16(0x1910) : cpu_to_le16(0x1910);
 
        fill_backlight_transform_table_v_2_2(
-                       params, ram_table);
+                       params, ram_table, big_endian);
+}
+
+bool dmub_init_abm_config(struct abm *abm,
+       struct dmcu_iram_parameters params)
+{
+       unsigned char ram_table[IRAM_SIZE];
+       bool result = false;
+
+       if (abm == NULL)
+               return false;
+
+       memset(&ram_table, 0, sizeof(ram_table));
+
+       fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params, false);
+       result = abm->funcs->init_abm_config(
+               abm, (char *)(&ram_table), IRAM_RESERVE_AREA_START_V2_2);
+
+       return result;
 }
 
 bool dmcu_load_iram(struct dmcu *dmcu,
@@ -657,17 +680,17 @@ bool dmcu_load_iram(struct dmcu *dmcu,
        if (dmcu == NULL)
                return false;
 
-       if (!dmcu->funcs->is_dmcu_initialized(dmcu))
+       if (dmcu && !dmcu->funcs->is_dmcu_initialized(dmcu))
                return true;
 
        memset(&ram_table, 0, sizeof(ram_table));
 
        if (dmcu->dmcu_version.abm_version == 0x24) {
-               fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params);
-               result = dmcu->funcs->load_iram(
-                               dmcu, 0, (char *)(&ram_table), IRAM_RESERVE_AREA_START_V2_2);
+               fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params, true);
+                       result = dmcu->funcs->load_iram(
+                                       dmcu, 0, (char *)(&ram_table), IRAM_RESERVE_AREA_START_V2_2);
        } else if (dmcu->dmcu_version.abm_version == 0x23) {
-               fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params);
+               fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params, true);
 
                result = dmcu->funcs->load_iram(
                                dmcu, 0, (char *)(&ram_table), IRAM_RESERVE_AREA_START_V2_2);
index e541570263308af5c504748db2f401e7106c6919..46fbca2e2cd1c850a68e62c1e87fdaa844b2cf40 100644 (file)
@@ -26,6 +26,7 @@
 #define MODULES_POWER_POWER_HELPERS_H_
 
 #include "dc/inc/hw/dmcu.h"
+#include "dc/inc/hw/abm.h"
 
 
 enum abm_defines {
@@ -44,5 +45,7 @@ struct dmcu_iram_parameters {
 
 bool dmcu_load_iram(struct dmcu *dmcu,
                struct dmcu_iram_parameters params);
+bool dmub_init_abm_config(struct abm *abm,
+               struct dmcu_iram_parameters params);
 
 #endif /* MODULES_POWER_POWER_HELPERS_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c
deleted file mode 100644 (file)
index 03121ca..0000000
+++ /dev/null
@@ -1,448 +0,0 @@
-/*
- * Copyright 2016 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#include "mod_stats.h"
-#include "dm_services.h"
-#include "dc.h"
-#include "core_types.h"
-
-#define DAL_STATS_ENABLE_REGKEY                        "DalStatsEnable"
-#define DAL_STATS_ENABLE_REGKEY_DEFAULT                0x00000000
-#define DAL_STATS_ENABLE_REGKEY_ENABLED                0x00000001
-
-#define DAL_STATS_ENTRIES_REGKEY               "DalStatsEntries"
-#define DAL_STATS_ENTRIES_REGKEY_DEFAULT       0x00350000
-#define DAL_STATS_ENTRIES_REGKEY_MAX           0x01000000
-
-#define DAL_STATS_EVENT_ENTRIES_DEFAULT                0x00000100
-
-#define MOD_STATS_NUM_VSYNCS                   5
-#define MOD_STATS_EVENT_STRING_MAX             512
-
-struct stats_time_cache {
-       unsigned int entry_id;
-
-       unsigned long flip_timestamp_in_ns;
-       unsigned long vupdate_timestamp_in_ns;
-
-       unsigned int render_time_in_us;
-       unsigned int avg_render_time_in_us_last_ten;
-       unsigned int v_sync_time_in_us[MOD_STATS_NUM_VSYNCS];
-       unsigned int num_vsync_between_flips;
-
-       unsigned int flip_to_vsync_time_in_us;
-       unsigned int vsync_to_flip_time_in_us;
-
-       unsigned int min_window;
-       unsigned int max_window;
-       unsigned int v_total_min;
-       unsigned int v_total_max;
-       unsigned int event_triggers;
-
-       unsigned int lfc_mid_point_in_us;
-       unsigned int num_frames_inserted;
-       unsigned int inserted_duration_in_us;
-
-       unsigned int flags;
-};
-
-struct stats_event_cache {
-       unsigned int entry_id;
-       char event_string[MOD_STATS_EVENT_STRING_MAX];
-};
-
-struct core_stats {
-       struct mod_stats public;
-       struct dc *dc;
-
-       bool enabled;
-       unsigned int entries;
-       unsigned int event_entries;
-       unsigned int entry_id;
-
-       struct stats_time_cache *time;
-       unsigned int index;
-
-       struct stats_event_cache *events;
-       unsigned int event_index;
-
-};
-
-#define MOD_STATS_TO_CORE(mod_stats)\
-               container_of(mod_stats, struct core_stats, public)
-
-bool mod_stats_init(struct mod_stats *mod_stats)
-{
-       bool result = false;
-       struct core_stats *core_stats = NULL;
-       struct dc *dc = NULL;
-
-       if (mod_stats == NULL)
-               return false;
-
-       core_stats = MOD_STATS_TO_CORE(mod_stats);
-       dc = core_stats->dc;
-
-       return result;
-}
-
-struct mod_stats *mod_stats_create(struct dc *dc)
-{
-       struct core_stats *core_stats = NULL;
-       struct persistent_data_flag flag;
-       unsigned int reg_data;
-       int i = 0;
-
-       if (dc == NULL)
-               goto fail_construct;
-
-       core_stats = kzalloc(sizeof(struct core_stats), GFP_KERNEL);
-
-       if (core_stats == NULL)
-               goto fail_construct;
-
-       core_stats->dc = dc;
-
-       core_stats->enabled = DAL_STATS_ENABLE_REGKEY_DEFAULT;
-       if (dm_read_persistent_data(dc->ctx, NULL, NULL,
-                       DAL_STATS_ENABLE_REGKEY,
-                       &reg_data, sizeof(unsigned int), &flag))
-               core_stats->enabled = reg_data;
-
-       if (core_stats->enabled) {
-               core_stats->entries = DAL_STATS_ENTRIES_REGKEY_DEFAULT;
-               if (dm_read_persistent_data(dc->ctx, NULL, NULL,
-                               DAL_STATS_ENTRIES_REGKEY,
-                               &reg_data, sizeof(unsigned int), &flag)) {
-                       if (reg_data > DAL_STATS_ENTRIES_REGKEY_MAX)
-                               core_stats->entries = DAL_STATS_ENTRIES_REGKEY_MAX;
-                       else
-                               core_stats->entries = reg_data;
-               }
-               core_stats->time = kcalloc(core_stats->entries,
-                                               sizeof(struct stats_time_cache),
-                                               GFP_KERNEL);
-
-               if (core_stats->time == NULL)
-                       goto fail_construct_time;
-
-               core_stats->event_entries = DAL_STATS_EVENT_ENTRIES_DEFAULT;
-               core_stats->events = kcalloc(core_stats->event_entries,
-                                            sizeof(struct stats_event_cache),
-                                            GFP_KERNEL);
-
-               if (core_stats->events == NULL)
-                       goto fail_construct_events;
-
-       } else {
-               core_stats->entries = 0;
-       }
-
-       /* Purposely leave index 0 unused so we don't need special logic to
-        * handle calculation cases that depend on previous flip data.
-        */
-       core_stats->index = 1;
-       core_stats->event_index = 0;
-
-       // Keeps track of ordering within the different stats structures
-       core_stats->entry_id = 0;
-
-       return &core_stats->public;
-
-fail_construct_events:
-       kfree(core_stats->time);
-
-fail_construct_time:
-       kfree(core_stats);
-
-fail_construct:
-       return NULL;
-}
-
-void mod_stats_destroy(struct mod_stats *mod_stats)
-{
-       if (mod_stats != NULL) {
-               struct core_stats *core_stats = MOD_STATS_TO_CORE(mod_stats);
-
-               kfree(core_stats->time);
-               kfree(core_stats->events);
-               kfree(core_stats);
-       }
-}
-
-void mod_stats_dump(struct mod_stats *mod_stats)
-{
-       struct dc  *dc = NULL;
-       struct dal_logger *logger = NULL;
-       struct core_stats *core_stats = NULL;
-       struct stats_time_cache *time = NULL;
-       struct stats_event_cache *events = NULL;
-       unsigned int time_index = 1;
-       unsigned int event_index = 0;
-       unsigned int index = 0;
-       struct log_entry log_entry;
-
-       if (mod_stats == NULL)
-               return;
-
-       core_stats = MOD_STATS_TO_CORE(mod_stats);
-       dc = core_stats->dc;
-       logger = dc->ctx->logger;
-       time = core_stats->time;
-       events = core_stats->events;
-
-       DISPLAY_STATS_BEGIN(log_entry);
-
-       DISPLAY_STATS("==Display Caps==\n");
-
-       DISPLAY_STATS("==Display Stats==\n");
-
-       DISPLAY_STATS("%10s %10s %10s %10s %10s"
-                       " %11s %11s %17s %10s %14s"
-                       " %10s %10s %10s %10s %10s"
-                       " %10s %10s %10s %10s\n",
-               "render", "avgRender",
-               "minWindow", "midPoint", "maxWindow",
-               "vsyncToFlip", "flipToVsync", "vsyncsBetweenFlip",
-               "numFrame", "insertDuration",
-               "vTotalMin", "vTotalMax", "eventTrigs",
-               "vSyncTime1", "vSyncTime2", "vSyncTime3",
-               "vSyncTime4", "vSyncTime5", "flags");
-
-       for (int i = 0; i < core_stats->entry_id; i++) {
-               if (event_index < core_stats->event_index &&
-                               i == events[event_index].entry_id) {
-                       DISPLAY_STATS("==Event==%s\n", events[event_index].event_string);
-                       event_index++;
-               } else if (time_index < core_stats->index &&
-                               i == time[time_index].entry_id) {
-                       DISPLAY_STATS("%10u %10u %10u %10u %10u"
-                                       " %11u %11u %17u %10u %14u"
-                                       " %10u %10u %10u %10u %10u"
-                                       " %10u %10u %10u %10u\n",
-                               time[time_index].render_time_in_us,
-                               time[time_index].avg_render_time_in_us_last_ten,
-                               time[time_index].min_window,
-                               time[time_index].lfc_mid_point_in_us,
-                               time[time_index].max_window,
-                               time[time_index].vsync_to_flip_time_in_us,
-                               time[time_index].flip_to_vsync_time_in_us,
-                               time[time_index].num_vsync_between_flips,
-                               time[time_index].num_frames_inserted,
-                               time[time_index].inserted_duration_in_us,
-                               time[time_index].v_total_min,
-                               time[time_index].v_total_max,
-                               time[time_index].event_triggers,
-                               time[time_index].v_sync_time_in_us[0],
-                               time[time_index].v_sync_time_in_us[1],
-                               time[time_index].v_sync_time_in_us[2],
-                               time[time_index].v_sync_time_in_us[3],
-                               time[time_index].v_sync_time_in_us[4],
-                               time[time_index].flags);
-
-                       time_index++;
-               }
-       }
-
-       DISPLAY_STATS_END(log_entry);
-}
-
-void mod_stats_reset_data(struct mod_stats *mod_stats)
-{
-       struct core_stats *core_stats = NULL;
-       struct stats_time_cache *time = NULL;
-       unsigned int index = 0;
-
-       if (mod_stats == NULL)
-               return;
-
-       core_stats = MOD_STATS_TO_CORE(mod_stats);
-
-       memset(core_stats->time, 0,
-               sizeof(struct stats_time_cache) * core_stats->entries);
-
-       memset(core_stats->events, 0,
-               sizeof(struct stats_event_cache) * core_stats->event_entries);
-
-       core_stats->index = 1;
-       core_stats->event_index = 0;
-
-       // Keeps track of ordering within the different stats structures
-       core_stats->entry_id = 0;
-}
-
-void mod_stats_update_event(struct mod_stats *mod_stats,
-               char *event_string,
-               unsigned int length)
-{
-       struct core_stats *core_stats = NULL;
-       struct stats_event_cache *events = NULL;
-       unsigned int index = 0;
-       unsigned int copy_length = 0;
-
-       if (mod_stats == NULL)
-               return;
-
-       core_stats = MOD_STATS_TO_CORE(mod_stats);
-
-       if (core_stats->event_index >= core_stats->event_entries)
-               return;
-
-       events = core_stats->events;
-       index = core_stats->event_index;
-
-       copy_length = length;
-       if (length > MOD_STATS_EVENT_STRING_MAX)
-               copy_length = MOD_STATS_EVENT_STRING_MAX;
-
-       memcpy(&events[index].event_string, event_string, copy_length);
-       events[index].event_string[copy_length - 1] = '\0';
-
-       events[index].entry_id = core_stats->entry_id;
-       core_stats->event_index++;
-       core_stats->entry_id++;
-}
-
-void mod_stats_update_flip(struct mod_stats *mod_stats,
-               unsigned long timestamp_in_ns)
-{
-       struct core_stats *core_stats = NULL;
-       struct stats_time_cache *time = NULL;
-       unsigned int index = 0;
-
-       if (mod_stats == NULL)
-               return;
-
-       core_stats = MOD_STATS_TO_CORE(mod_stats);
-
-       if (core_stats->index >= core_stats->entries)
-               return;
-
-       time = core_stats->time;
-       index = core_stats->index;
-
-       time[index].flip_timestamp_in_ns = timestamp_in_ns;
-       time[index].render_time_in_us =
-               (timestamp_in_ns - time[index - 1].flip_timestamp_in_ns) / 1000;
-
-       if (index >= 10) {
-               for (unsigned int i = 0; i < 10; i++)
-                       time[index].avg_render_time_in_us_last_ten +=
-                                       time[index - i].render_time_in_us;
-               time[index].avg_render_time_in_us_last_ten /= 10;
-       }
-
-       if (time[index].num_vsync_between_flips > 0)
-               time[index].vsync_to_flip_time_in_us =
-                       (timestamp_in_ns -
-                               time[index].vupdate_timestamp_in_ns) / 1000;
-       else
-               time[index].vsync_to_flip_time_in_us =
-                       (timestamp_in_ns -
-                               time[index - 1].vupdate_timestamp_in_ns) / 1000;
-
-       time[index].entry_id = core_stats->entry_id;
-       core_stats->index++;
-       core_stats->entry_id++;
-}
-
-void mod_stats_update_vupdate(struct mod_stats *mod_stats,
-               unsigned long timestamp_in_ns)
-{
-       struct core_stats *core_stats = NULL;
-       struct stats_time_cache *time = NULL;
-       unsigned int index = 0;
-       unsigned int num_vsyncs = 0;
-       unsigned int prev_vsync_in_ns = 0;
-
-       if (mod_stats == NULL)
-               return;
-
-       core_stats = MOD_STATS_TO_CORE(mod_stats);
-
-       if (core_stats->index >= core_stats->entries)
-               return;
-
-       time = core_stats->time;
-       index = core_stats->index;
-       num_vsyncs = time[index].num_vsync_between_flips;
-
-       if (num_vsyncs < MOD_STATS_NUM_VSYNCS) {
-               if (num_vsyncs == 0) {
-                       prev_vsync_in_ns =
-                               time[index - 1].vupdate_timestamp_in_ns;
-
-                       time[index].flip_to_vsync_time_in_us =
-                               (timestamp_in_ns -
-                                       time[index - 1].flip_timestamp_in_ns) /
-                                       1000;
-               } else {
-                       prev_vsync_in_ns =
-                               time[index].vupdate_timestamp_in_ns;
-               }
-
-               time[index].v_sync_time_in_us[num_vsyncs] =
-                       (timestamp_in_ns - prev_vsync_in_ns) / 1000;
-       }
-
-       time[index].vupdate_timestamp_in_ns = timestamp_in_ns;
-       time[index].num_vsync_between_flips++;
-}
-
-void mod_stats_update_freesync(struct mod_stats *mod_stats,
-               unsigned int v_total_min,
-               unsigned int v_total_max,
-               unsigned int event_triggers,
-               unsigned int window_min,
-               unsigned int window_max,
-               unsigned int lfc_mid_point_in_us,
-               unsigned int inserted_frames,
-               unsigned int inserted_duration_in_us)
-{
-       struct core_stats *core_stats = NULL;
-       struct stats_time_cache *time = NULL;
-       unsigned int index = 0;
-
-       if (mod_stats == NULL)
-               return;
-
-       core_stats = MOD_STATS_TO_CORE(mod_stats);
-
-       if (core_stats->index >= core_stats->entries)
-               return;
-
-       time = core_stats->time;
-       index = core_stats->index;
-
-       time[index].v_total_min = v_total_min;
-       time[index].v_total_max = v_total_max;
-       time[index].event_triggers = event_triggers;
-       time[index].min_window = window_min;
-       time[index].max_window = window_max;
-       time[index].lfc_mid_point_in_us = lfc_mid_point_in_us;
-       time[index].num_frames_inserted = inserted_frames;
-       time[index].inserted_duration_in_us = inserted_duration_in_us;
-}
-
index 00f132f8ad55dbfa2720878cb4f0cd590d54a697..61ee4be35d27d87230a3c710f7b0ff32a9fe75ab 100644 (file)
@@ -112,9 +112,12 @@ uint8_t mod_vmid_get_for_ptb(struct mod_vmid *mod_vmid, uint64_t ptb)
                        evict_vmids(core_vmid);
 
                vmid = get_next_available_vmid(core_vmid);
-               add_ptb_to_table(core_vmid, vmid, ptb);
+               if (vmid != -1) {
+                       add_ptb_to_table(core_vmid, vmid, ptb);
 
-               dc_setup_vm_context(core_vmid->dc, &va_config, vmid);
+                       dc_setup_vm_context(core_vmid->dc, &va_config, vmid);
+               } else
+                       ASSERT(0);
        }
 
        return vmid;
index d655a76bedc6c3da640fce66d6a8b45876e846b4..e98c84ef206fec4a4a54c21d8a18fc1d0480e454 100644 (file)
@@ -40,6 +40,13 @@ enum amd_chip_flags {
        AMD_EXP_HW_SUPPORT = 0x00080000UL,
 };
 
+enum amd_apu_flags {
+       AMD_APU_IS_RAVEN = 0x00000001UL,
+       AMD_APU_IS_RAVEN2 = 0x00000002UL,
+       AMD_APU_IS_PICASSO = 0x00000004UL,
+       AMD_APU_IS_RENOIR = 0x00000008UL,
+};
+
 enum amd_ip_block_type {
        AMD_IP_BLOCK_TYPE_COMMON,
        AMD_IP_BLOCK_TYPE_GMC,
@@ -150,6 +157,13 @@ enum DC_FEATURE_MASK {
        DC_PSR_MASK = 0x8,
 };
 
+enum DC_DEBUG_MASK {
+       DC_DISABLE_PIPE_SPLIT = 0x1,
+       DC_DISABLE_STUTTER = 0x2,
+       DC_DISABLE_DSC = 0x4,
+       DC_DISABLE_CLOCK_GATING = 0x8
+};
+
 enum amd_dpm_forced_level;
 /**
  * struct amd_ip_funcs - general hooks for managing amdgpu IP Blocks
index e7db6f9f9c865f017b7b7e4e447c74c915b3dabd..8b0b9a2a8fed662b590209b72d1c22d43439fb98 100644 (file)
 #define GRBM_PWR_CNTL__ALL_REQ_EN_MASK                                                                        0x00008000L
 //GRBM_STATUS
 #define GRBM_STATUS__ME0PIPE0_CMDFIFO_AVAIL__SHIFT                                                            0x0
+#define GRBM_STATUS__RSMU_RQ_PENDING__SHIFT                                                                   0x5
 #define GRBM_STATUS__ME0PIPE0_CF_RQ_PENDING__SHIFT                                                            0x7
 #define GRBM_STATUS__ME0PIPE0_PF_RQ_PENDING__SHIFT                                                            0x8
 #define GRBM_STATUS__GDS_DMA_RQ_PENDING__SHIFT                                                                0x9
 #define GRBM_STATUS__CB_BUSY__SHIFT                                                                           0x1e
 #define GRBM_STATUS__GUI_ACTIVE__SHIFT                                                                        0x1f
 #define GRBM_STATUS__ME0PIPE0_CMDFIFO_AVAIL_MASK                                                              0x0000000FL
+#define GRBM_STATUS__RSMU_RQ_PENDING_MASK                                                                     0x00000020L
 #define GRBM_STATUS__ME0PIPE0_CF_RQ_PENDING_MASK                                                              0x00000080L
 #define GRBM_STATUS__ME0PIPE0_PF_RQ_PENDING_MASK                                                              0x00000100L
 #define GRBM_STATUS__GDS_DMA_RQ_PENDING_MASK                                                                  0x00000200L
 #define GRBM_READ_ERROR__READ_ERROR_MASK                                                                      0x80000000L
 //GRBM_READ_ERROR2
 #define GRBM_READ_ERROR2__READ_REQUESTER_CPF__SHIFT                                                           0x10
+#define GRBM_READ_ERROR2__READ_REQUESTER_RSMU__SHIFT                                                          0x11
 #define GRBM_READ_ERROR2__READ_REQUESTER_RLC__SHIFT                                                           0x12
 #define GRBM_READ_ERROR2__READ_REQUESTER_GDS_DMA__SHIFT                                                       0x13
 #define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_CF__SHIFT                                                   0x14
 #define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE2__SHIFT                                                      0x1e
 #define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE3__SHIFT                                                      0x1f
 #define GRBM_READ_ERROR2__READ_REQUESTER_CPF_MASK                                                             0x00010000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_RSMU_MASK                                                            0x00020000L
 #define GRBM_READ_ERROR2__READ_REQUESTER_RLC_MASK                                                             0x00040000L
 #define GRBM_READ_ERROR2__READ_REQUESTER_GDS_DMA_MASK                                                         0x00080000L
 #define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_CF_MASK                                                     0x00100000L
index 68d0ffad28c7dd6716e02a2c1a9b79c452e8fc62..92fd27c26a77b1e16605d99669d1b2e2b3c827d8 100644 (file)
 #define mmRCC_CONFIG_MEMSIZE_BASE_IDX                                                                  0
 #define mmRCC_CONFIG_RESERVED                                                                          0x0de4 // duplicate 
 #define mmRCC_CONFIG_RESERVED_BASE_IDX                                                                 0
+#ifndef mmRCC_IOV_FUNC_IDENTIFIER
 #define mmRCC_IOV_FUNC_IDENTIFIER                                                                      0x0de5 // duplicate 
 #define mmRCC_IOV_FUNC_IDENTIFIER_BASE_IDX                                                             0
+#endif
 
 
 // addressBlock: syshub_mmreg_ind_syshubdec
index 435462294fbc514b343dd709c3d5cd55cff7034f..a7cd760ebf8f14edaa72462245d35ddee8718b90 100644 (file)
 #define mmRCC_CONFIG_MEMSIZE_BASE_IDX                                                                  2
 #define mmRCC_CONFIG_RESERVED                                                                          0x00c4
 #define mmRCC_CONFIG_RESERVED_BASE_IDX                                                                 2
+#ifndef mmRCC_IOV_FUNC_IDENTIFIER
 #define mmRCC_IOV_FUNC_IDENTIFIER                                                                      0x00c5
 #define mmRCC_IOV_FUNC_IDENTIFIER_BASE_IDX                                                             2
+#endif
 
 
 // addressBlock: nbio_nbif0_rcc_dev0_BIFDEC1
index ce5830ebe095ab6635971041fcc16e8ca4fbe4ba..0c5a08bc034a6b16422da9925c6273184a8a5040 100644 (file)
 #define mmRCC_CONFIG_MEMSIZE_BASE_IDX                                                                  2
 #define mmRCC_CONFIG_RESERVED                                                                          0x00c4
 #define mmRCC_CONFIG_RESERVED_BASE_IDX                                                                 2
+#ifndef mmRCC_IOV_FUNC_IDENTIFIER
 #define mmRCC_IOV_FUNC_IDENTIFIER                                                                      0x00c5
 #define mmRCC_IOV_FUNC_IDENTIFIER_BASE_IDX                                                             2
+#endif
 
 
 // addressBlock: nbio_nbif0_rcc_dev0_BIFDEC1
diff --git a/drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_offset.h
new file mode 100644 (file)
index 0000000..e87c359
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2020  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _pwr_10_0_OFFSET_HEADER
+#define _pwr_10_0_OFFSET_HEADER
+
+#define mmPWR_MISC_CNTL_STATUS                                                                         0x0183
+#define mmPWR_MISC_CNTL_STATUS_BASE_IDX                                                                0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_sh_mask.h
new file mode 100644 (file)
index 0000000..8a000c2
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2020  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _pwr_10_0_SH_MASK_HEADER
+#define _pwr_10_0_SH_MASK_HEADER
+
+//PWR_MISC_CNTL_STATUS
+#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT                                                      0x0
+#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT                                                        0x1
+#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK                                                        0x00000001L
+#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK                                                          0x00000006L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_offset.h
new file mode 100644 (file)
index 0000000..9bf7328
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2020  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _smuio_12_0_0_OFFSET_HEADER
+#define _smuio_12_0_0_OFFSET_HEADER
+
+#define mmSMUIO_GFX_MISC_CNTL                                                                          0x00c8
+#define mmSMUIO_GFX_MISC_CNTL_BASE_IDX                                                                 0
+
+#define mmPWR_MISC_CNTL_STATUS                                                                         0x0183
+#define mmPWR_MISC_CNTL_STATUS_BASE_IDX                                                                1
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_sh_mask.h
new file mode 100644 (file)
index 0000000..26556fa
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2020  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _smuio_12_0_0_SH_MASK_HEADER
+#define _smuio_12_0_0_SH_MASK_HEADER
+
+//SMUIO_GFX_MISC_CNTL
+#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK                                                           0x00000006L
+#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT                                                         0x1
+//PWR_MISC_CNTL_STATUS
+#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT                                                      0x0
+#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT                                                        0x1
+#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK                                                        0x00000001L
+#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK                                                          0x00000006L
+
+#endif
index 70146518174cd865acfd23fbbe6ec813ddb579e9..b36ea8340afa604eace3a1354862ff8a428879c4 100644 (file)
@@ -972,11 +972,13 @@ struct atom_ext_display_path
 };
 
 //usCaps
-enum ext_display_path_cap_def
-{
-  EXT_DISPLAY_PATH_CAPS__HBR2_DISABLE               =0x0001,
-  EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN             =0x0002,
-  EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK              =0x007C,           
+enum ext_display_path_cap_def {
+       EXT_DISPLAY_PATH_CAPS__HBR2_DISABLE =           0x0001,
+       EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN =         0x0002,
+       EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK =          0x007C,
+       EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204 =      (0x01 << 2), //PI redriver chip
+       EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT = (0x02 << 2), //TI retimer chip
+       EXT_DISPLAY_PATH_CAPS__HDMI20_PARADE_PS175 =    (0x03 << 2)  //Parade DP->HDMI recoverter chip
 };
 
 struct atom_external_display_connection_info
@@ -1876,6 +1878,108 @@ struct atom_smc_dpm_info_v4_6
   uint32_t   boardreserved[10];
 };
 
+struct atom_smc_dpm_info_v4_7
+{
+  struct   atom_common_table_header  table_header;
+    // SECTION: BOARD PARAMETERS
+    // I2C Control
+  struct smudpm_i2c_controller_config_v2  I2cControllers[8];
+
+  // SVI2 Board Parameters
+  uint16_t     MaxVoltageStepGfx; // In mV(Q2) Max voltage step that SMU will request. Multiple steps are taken if voltage change exceeds this value.
+  uint16_t     MaxVoltageStepSoc; // In mV(Q2) Max voltage step that SMU will request. Multiple steps are taken if voltage change exceeds this value.
+
+  uint8_t      VddGfxVrMapping;   // Use VR_MAPPING* bitfields
+  uint8_t      VddSocVrMapping;   // Use VR_MAPPING* bitfields
+  uint8_t      VddMem0VrMapping;  // Use VR_MAPPING* bitfields
+  uint8_t      VddMem1VrMapping;  // Use VR_MAPPING* bitfields
+
+  uint8_t      GfxUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode
+  uint8_t      SocUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode
+  uint8_t      ExternalSensorPresent; // External RDI connected to TMON (aka TEMP IN)
+  uint8_t      Padding8_V;
+
+  // Telemetry Settings
+  uint16_t     GfxMaxCurrent;   // in Amps
+  uint8_t      GfxOffset;       // in Amps
+  uint8_t      Padding_TelemetryGfx;
+  uint16_t     SocMaxCurrent;   // in Amps
+  uint8_t      SocOffset;       // in Amps
+  uint8_t      Padding_TelemetrySoc;
+
+  uint16_t     Mem0MaxCurrent;   // in Amps
+  uint8_t      Mem0Offset;       // in Amps
+  uint8_t      Padding_TelemetryMem0;
+
+  uint16_t     Mem1MaxCurrent;   // in Amps
+  uint8_t      Mem1Offset;       // in Amps
+  uint8_t      Padding_TelemetryMem1;
+
+  // GPIO Settings
+  uint8_t      AcDcGpio;        // GPIO pin configured for AC/DC switching
+  uint8_t      AcDcPolarity;    // GPIO polarity for AC/DC switching
+  uint8_t      VR0HotGpio;      // GPIO pin configured for VR0 HOT event
+  uint8_t      VR0HotPolarity;  // GPIO polarity for VR0 HOT event
+
+  uint8_t      VR1HotGpio;      // GPIO pin configured for VR1 HOT event
+  uint8_t      VR1HotPolarity;  // GPIO polarity for VR1 HOT event
+  uint8_t      GthrGpio;        // GPIO pin configured for GTHR Event
+  uint8_t      GthrPolarity;    // replace GPIO polarity for GTHR
+
+  // LED Display Settings
+  uint8_t      LedPin0;         // GPIO number for LedPin[0]
+  uint8_t      LedPin1;         // GPIO number for LedPin[1]
+  uint8_t      LedPin2;         // GPIO number for LedPin[2]
+  uint8_t      padding8_4;
+
+  // GFXCLK PLL Spread Spectrum
+  uint8_t      PllGfxclkSpreadEnabled;   // on or off
+  uint8_t      PllGfxclkSpreadPercent;   // Q4.4
+  uint16_t     PllGfxclkSpreadFreq;      // kHz
+
+  // GFXCLK DFLL Spread Spectrum
+  uint8_t      DfllGfxclkSpreadEnabled;   // on or off
+  uint8_t      DfllGfxclkSpreadPercent;   // Q4.4
+  uint16_t     DfllGfxclkSpreadFreq;      // kHz
+
+  // UCLK Spread Spectrum
+  uint8_t      UclkSpreadEnabled;   // on or off
+  uint8_t      UclkSpreadPercent;   // Q4.4
+  uint16_t     UclkSpreadFreq;      // kHz
+
+  // SOCCLK Spread Spectrum
+  uint8_t      SoclkSpreadEnabled;   // on or off
+  uint8_t      SocclkSpreadPercent;   // Q4.4
+  uint16_t     SocclkSpreadFreq;      // kHz
+
+  // Total board power
+  uint16_t     TotalBoardPower;     //Only needed for TCP Estimated case, where TCP = TGP+Total Board Power
+  uint16_t     BoardPadding;
+
+  // Mvdd Svi2 Div Ratio Setting
+  uint32_t     MvddRatio; // This is used for MVDD Vid workaround. It has 16 fractional bits (Q16.16)
+
+  // GPIO pins for I2C communications with 2nd controller for Input Telemetry Sequence
+  uint8_t      GpioI2cScl;          // Serial Clock
+  uint8_t      GpioI2cSda;          // Serial Data
+  uint16_t     GpioPadding;
+
+  // Additional LED Display Settings
+  uint8_t      LedPin3;         // GPIO number for LedPin[3] - PCIE GEN Speed
+  uint8_t      LedPin4;         // GPIO number for LedPin[4] - PMFW Error Status
+  uint16_t     LedEnableMask;
+
+  // Power Limit Scalars
+  uint8_t      PowerLimitScalar[4];    //[PPT_THROTTLER_COUNT]
+
+  uint8_t      MvddUlvPhaseSheddingMask;
+  uint8_t      VddciUlvPhaseSheddingMask;
+  uint8_t      Padding8_Psi1;
+  uint8_t      Padding8_Psi2;
+
+  uint32_t     BoardReserved[5];
+};
+
 /* 
   ***************************************************************************
     Data Table asic_profiling_info  structure
index a69deb3a2ac07a8b3e0db851dc0ccdec6b00d351..60a6536ff656d70a4ade58c3a84c57798157415c 100644 (file)
@@ -32,7 +32,6 @@ struct cgs_device;
  * enum cgs_ind_reg - Indirect register spaces
  */
 enum cgs_ind_reg {
-       CGS_IND_REG__MMIO,
        CGS_IND_REG__PCIE,
        CGS_IND_REG__SMC,
        CGS_IND_REG__UVD_CTX,
index 8e2acb4df860b800474efd8f5fce1cdc5a1f7ca4..7e6dcdf7df73a83d37da79af18fe516205d84a57 100644 (file)
@@ -50,6 +50,7 @@ static int amd_powerplay_create(struct amdgpu_device *adev)
        hwmgr->not_vf = !amdgpu_sriov_vf(adev);
        hwmgr->device = amdgpu_cgs_create_device(adev);
        mutex_init(&hwmgr->smu_lock);
+       mutex_init(&hwmgr->msg_lock);
        hwmgr->chip_family = adev->family;
        hwmgr->chip_id = adev->asic_type;
        hwmgr->feature_mask = adev->pm.pp_feature;
@@ -64,6 +65,8 @@ static void amd_powerplay_destroy(struct amdgpu_device *adev)
 {
        struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
 
+       mutex_destroy(&hwmgr->msg_lock);
+
        kfree(hwmgr->hardcode_pp_table);
        hwmgr->hardcode_pp_table = NULL;
 
index e77046931e4c6beb3c908fe2040496d73f3abba1..8c684a6e0156547df01b48abfacffee58b76a503 100644 (file)
@@ -62,6 +62,7 @@ const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask
 
 size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
 {
+       struct amdgpu_device *adev = smu->adev;
        size_t size = 0;
        int ret = 0, i = 0;
        uint32_t feature_mask[2] = { 0 };
@@ -70,6 +71,9 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
        uint32_t sort_feature[SMU_FEATURE_COUNT];
        uint64_t hw_feature_count = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
@@ -110,9 +114,6 @@ static int smu_feature_update_enable_state(struct smu_context *smu,
        uint32_t feature_low = 0, feature_high = 0;
        int ret = 0;
 
-       if (!smu->pm_enabled)
-               return ret;
-
        feature_low = (feature_mask >> 0 ) & 0xffffffff;
        feature_high = (feature_mask >> 32) & 0xffffffff;
 
@@ -155,6 +156,10 @@ int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
        uint64_t feature_2_enabled = 0;
        uint64_t feature_2_disabled = 0;
        uint64_t feature_enables = 0;
+       struct amdgpu_device *adev = smu->adev;
+
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
 
        mutex_lock(&smu->mutex);
 
@@ -191,16 +196,31 @@ int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t
        if (!if_version && !smu_version)
                return -EINVAL;
 
+       if (smu->smc_fw_if_version && smu->smc_fw_version)
+       {
+               if (if_version)
+                       *if_version = smu->smc_fw_if_version;
+
+               if (smu_version)
+                       *smu_version = smu->smc_fw_version;
+
+               return 0;
+       }
+
        if (if_version) {
                ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
                if (ret)
                        return ret;
+
+               smu->smc_fw_if_version = *if_version;
        }
 
        if (smu_version) {
                ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
                if (ret)
                        return ret;
+
+               smu->smc_fw_version = *smu_version;
        }
 
        return ret;
@@ -327,13 +347,13 @@ int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_typ
        param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
 
        ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmFreqByIndex,
-                                         param, &param);
+                                         param, value);
        if (ret)
                return ret;
 
        /* BIT31:  0 - Fine grained DPM, 1 - Dicrete DPM
         * now, we un-support it */
-       *value = param & 0x7fffffff;
+       *value = *value & 0x7fffffff;
 
        return ret;
 }
@@ -417,8 +437,12 @@ bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
 int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
                           bool gate)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        switch (block_type) {
        case AMD_IP_BLOCK_TYPE_UVD:
                ret = smu_dpm_set_uvd_enable(smu, !gate);
@@ -511,7 +535,6 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
        int table_id = smu_table_get_index(smu, table_index);
        uint32_t table_size;
        int ret = 0;
-
        if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
                return -EINVAL;
 
@@ -547,12 +570,10 @@ bool is_support_sw_smu(struct amdgpu_device *adev)
        if (adev->asic_type == CHIP_VEGA20)
                return (amdgpu_dpm == 2) ? true : false;
        else if (adev->asic_type >= CHIP_ARCTURUS) {
-               if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
-                       return false;
-               else
+             if (amdgpu_sriov_is_pp_one_vf(adev) || !amdgpu_sriov_vf(adev))
                        return true;
-       } else
-               return false;
+       }
+       return false;
 }
 
 bool is_support_sw_smu_xgmi(struct amdgpu_device *adev)
@@ -569,8 +590,12 @@ bool is_support_sw_smu_xgmi(struct amdgpu_device *adev)
 int smu_sys_get_pp_table(struct smu_context *smu, void **table)
 {
        struct smu_table_context *smu_table = &smu->smu_table;
+       struct amdgpu_device *adev = smu->adev;
        uint32_t powerplay_table_size;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
                return -EINVAL;
 
@@ -591,11 +616,13 @@ int smu_sys_get_pp_table(struct smu_context *smu, void **table)
 int smu_sys_set_pp_table(struct smu_context *smu,  void *buf, size_t size)
 {
        struct smu_table_context *smu_table = &smu->smu_table;
+       struct amdgpu_device *adev = smu->adev;
        ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
        int ret = 0;
 
-       if (!smu->pm_enabled)
+       if (!adev->pm.dpm_enabled)
                return -EINVAL;
+
        if (header->usStructureSize != size) {
                pr_err("pp table size not matched !\n");
                return -EIO;
@@ -636,8 +663,6 @@ int smu_feature_init_dpm(struct smu_context *smu)
        int ret = 0;
        uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
 
-       if (!smu->pm_enabled)
-               return ret;
        mutex_lock(&feature->mutex);
        bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
        mutex_unlock(&feature->mutex);
@@ -665,7 +690,6 @@ int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
 
        if (smu->is_apu)
                return 1;
-
        feature_id = smu_feature_get_index(smu, mask);
        if (feature_id < 0)
                return 0;
@@ -932,13 +956,6 @@ static int smu_sw_init(void *handle)
                return ret;
        }
 
-       if (adev->smu.ppt_funcs->i2c_eeprom_init) {
-               ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c);
-
-               if (ret)
-                       return ret;
-       }
-
        return 0;
 }
 
@@ -948,9 +965,6 @@ static int smu_sw_fini(void *handle)
        struct smu_context *smu = &adev->smu;
        int ret;
 
-       if (adev->smu.ppt_funcs->i2c_eeprom_fini)
-               smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c);
-
        kfree(smu->irq_source);
        smu->irq_source = NULL;
 
@@ -1323,6 +1337,9 @@ static int smu_hw_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        struct smu_context *smu = &adev->smu;
 
+       if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+               return 0;
+
        ret = smu_start_smc_engine(smu);
        if (ret) {
                pr_err("SMU is not ready yet!\n");
@@ -1336,9 +1353,6 @@ static int smu_hw_init(void *handle)
                smu_set_gfx_cgpg(&adev->smu, true);
        }
 
-       if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
-               return 0;
-
        if (!smu->pm_enabled)
                return 0;
 
@@ -1366,10 +1380,11 @@ static int smu_hw_init(void *handle)
        if (ret)
                goto failed;
 
-       if (!smu->pm_enabled)
-               adev->pm.dpm_enabled = false;
-       else
-               adev->pm.dpm_enabled = true;    /* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */
+       ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c);
+       if (ret)
+               goto failed;
+
+       adev->pm.dpm_enabled = true;
 
        pr_info("SMU is initialized successfully!\n");
 
@@ -1381,6 +1396,9 @@ failed:
 
 static int smu_stop_dpms(struct smu_context *smu)
 {
+       if (amdgpu_sriov_vf(smu->adev))
+               return 0;
+
        return smu_system_features_control(smu, false);
 }
 
@@ -1403,6 +1421,10 @@ static int smu_hw_fini(void *handle)
        if (!smu->pm_enabled)
                return 0;
 
+       adev->pm.dpm_enabled = false;
+
+       smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c);
+
        if (!amdgpu_sriov_vf(adev)){
                ret = smu_stop_thermal_control(smu);
                if (ret) {
@@ -1542,6 +1564,10 @@ static int smu_suspend(void *handle)
        if (!smu->pm_enabled)
                return 0;
 
+       adev->pm.dpm_enabled = false;
+
+       smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c);
+
        if(!amdgpu_sriov_vf(adev)) {
                ret = smu_disable_dpm(smu);
                if (ret)
@@ -1587,11 +1613,17 @@ static int smu_resume(void *handle)
        if (ret)
                goto failed;
 
+       ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c);
+       if (ret)
+               goto failed;
+
        if (smu->is_apu)
                smu_set_gfx_cgpg(&adev->smu, true);
 
        smu->disable_uclk_switch = 0;
 
+       adev->pm.dpm_enabled = true;
+
        pr_info("SMU is resumed successfully!\n");
 
        return 0;
@@ -1603,10 +1635,14 @@ failed:
 int smu_display_configuration_change(struct smu_context *smu,
                                     const struct amd_pp_display_configuration *display_config)
 {
+       struct amdgpu_device *adev = smu->adev;
        int index = 0;
        int num_of_active_display = 0;
 
-       if (!smu->pm_enabled || !is_support_sw_smu(smu->adev))
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
+       if (!is_support_sw_smu(smu->adev))
                return -EINVAL;
 
        if (!display_config)
@@ -1668,12 +1704,16 @@ int smu_get_current_clocks(struct smu_context *smu,
                           struct amd_pp_clock_info *clocks)
 {
        struct amd_pp_simple_clock_info simple_clocks = {0};
+       struct amdgpu_device *adev = smu->adev;
        struct smu_clock_info hw_clocks;
        int ret = 0;
 
        if (!is_support_sw_smu(smu->adev))
                return -EINVAL;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        smu_get_dal_power_level(smu, &simple_clocks);
@@ -1736,7 +1776,7 @@ static int smu_enable_umd_pstate(void *handle,
        struct smu_context *smu = (struct smu_context*)(handle);
        struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
 
-       if (!smu->is_apu && (!smu->pm_enabled || !smu_dpm_ctx->dpm_context))
+       if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
                return -EINVAL;
 
        if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
@@ -1778,9 +1818,6 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
        long workload;
        struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
 
-       if (!smu->pm_enabled)
-               return -EINVAL;
-
        if (!skip_display_settings) {
                ret = smu_display_config_changed(smu);
                if (ret) {
@@ -1831,8 +1868,12 @@ int smu_handle_task(struct smu_context *smu,
                    enum amd_pp_task task_id,
                    bool lock_needed)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        if (lock_needed)
                mutex_lock(&smu->mutex);
 
@@ -1866,10 +1907,11 @@ int smu_switch_power_profile(struct smu_context *smu,
                             bool en)
 {
        struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+       struct amdgpu_device *adev = smu->adev;
        long workload;
        uint32_t index;
 
-       if (!smu->pm_enabled)
+       if (!adev->pm.dpm_enabled)
                return -EINVAL;
 
        if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
@@ -1900,8 +1942,12 @@ int smu_switch_power_profile(struct smu_context *smu,
 enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
 {
        struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+       struct amdgpu_device *adev = smu->adev;
        enum amd_dpm_forced_level level;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
                return -EINVAL;
 
@@ -1915,8 +1961,12 @@ enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
 int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
 {
        struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
                return -EINVAL;
 
@@ -1939,8 +1989,12 @@ int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_lev
 
 int smu_set_display_count(struct smu_context *smu, uint32_t count)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
        ret = smu_init_display_count(smu, count);
        mutex_unlock(&smu->mutex);
@@ -1954,8 +2008,12 @@ int smu_force_clk_levels(struct smu_context *smu,
                         bool lock_needed)
 {
        struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
                pr_debug("force clock level is for dpm manual mode only.\n");
                return -EINVAL;
@@ -1973,20 +2031,19 @@ int smu_force_clk_levels(struct smu_context *smu,
        return ret;
 }
 
+/*
+ * On system suspending or resetting, the dpm_enabled
+ * flag will be cleared. So that those SMU services which
+ * are not supported will be gated.
+ * However, the mp1 state setting should still be granted
+ * even if the dpm_enabled cleared.
+ */
 int smu_set_mp1_state(struct smu_context *smu,
                      enum pp_mp1_state mp1_state)
 {
        uint16_t msg;
        int ret;
 
-       /*
-        * The SMC is not fully ready. That may be
-        * expected as the IP may be masked.
-        * So, just return without error.
-        */
-       if (!smu->pm_enabled)
-               return 0;
-
        mutex_lock(&smu->mutex);
 
        switch (mp1_state) {
@@ -2023,15 +2080,11 @@ int smu_set_mp1_state(struct smu_context *smu,
 int smu_set_df_cstate(struct smu_context *smu,
                      enum pp_df_cstate state)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
-       /*
-        * The SMC is not fully ready. That may be
-        * expected as the IP may be masked.
-        * So, just return without error.
-        */
-       if (!smu->pm_enabled)
-               return 0;
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
 
        if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
                return 0;
@@ -2047,6 +2100,28 @@ int smu_set_df_cstate(struct smu_context *smu,
        return ret;
 }
 
+int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
+{
+       struct amdgpu_device *adev = smu->adev;
+       int ret = 0;
+
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
+       if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
+               return 0;
+
+       mutex_lock(&smu->mutex);
+
+       ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en);
+       if (ret)
+               pr_err("[AllowXgmiPowerDown] failed!\n");
+
+       mutex_unlock(&smu->mutex);
+
+       return ret;
+}
+
 int smu_write_watermarks_table(struct smu_context *smu)
 {
        void *watermarks_table = smu->smu_table.watermarks_table;
@@ -2065,6 +2140,10 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
                struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
 {
        void *table = smu->smu_table.watermarks_table;
+       struct amdgpu_device *adev = smu->adev;
+
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
 
        if (!table)
                return -EINVAL;
@@ -2089,8 +2168,12 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
 
 int smu_set_ac_dc(struct smu_context *smu)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        /* controlled by firmware */
        if (smu->dc_controlled_by_gpio)
                return 0;
@@ -2149,8 +2232,12 @@ const struct amdgpu_ip_block_version smu_v12_0_ip_block =
 
 int smu_load_microcode(struct smu_context *smu)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->load_microcode)
@@ -2163,8 +2250,12 @@ int smu_load_microcode(struct smu_context *smu)
 
 int smu_check_fw_status(struct smu_context *smu)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->check_fw_status)
@@ -2191,8 +2282,12 @@ int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
 
 int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->set_fan_speed_rpm)
@@ -2208,10 +2303,15 @@ int smu_get_power_limit(struct smu_context *smu,
                        bool def,
                        bool lock_needed)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
-       if (lock_needed)
+       if (lock_needed) {
+               if (!adev->pm.dpm_enabled)
+                       return -EINVAL;
+
                mutex_lock(&smu->mutex);
+       }
 
        if (smu->ppt_funcs->get_power_limit)
                ret = smu->ppt_funcs->get_power_limit(smu, limit, def);
@@ -2224,8 +2324,12 @@ int smu_get_power_limit(struct smu_context *smu,
 
 int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->set_power_limit)
@@ -2238,8 +2342,12 @@ int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
 
 int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->print_clk_levels)
@@ -2252,8 +2360,12 @@ int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, ch
 
 int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->get_od_percentage)
@@ -2266,8 +2378,12 @@ int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
 
 int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->set_od_percentage)
@@ -2282,8 +2398,12 @@ int smu_od_edit_dpm_table(struct smu_context *smu,
                          enum PP_OD_DPM_TABLE_COMMAND type,
                          long *input, uint32_t size)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->od_edit_dpm_table)
@@ -2298,8 +2418,12 @@ int smu_read_sensor(struct smu_context *smu,
                    enum amd_pp_sensors sensor,
                    void *data, uint32_t *size)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->read_sensor)
@@ -2312,8 +2436,12 @@ int smu_read_sensor(struct smu_context *smu,
 
 int smu_get_power_profile_mode(struct smu_context *smu, char *buf)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->get_power_profile_mode)
@@ -2329,8 +2457,12 @@ int smu_set_power_profile_mode(struct smu_context *smu,
                               uint32_t param_size,
                               bool lock_needed)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        if (lock_needed)
                mutex_lock(&smu->mutex);
 
@@ -2346,8 +2478,12 @@ int smu_set_power_profile_mode(struct smu_context *smu,
 
 int smu_get_fan_control_mode(struct smu_context *smu)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->get_fan_control_mode)
@@ -2360,8 +2496,12 @@ int smu_get_fan_control_mode(struct smu_context *smu)
 
 int smu_set_fan_control_mode(struct smu_context *smu, int value)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->set_fan_control_mode)
@@ -2374,8 +2514,12 @@ int smu_set_fan_control_mode(struct smu_context *smu, int value)
 
 int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->get_fan_speed_percent)
@@ -2388,8 +2532,12 @@ int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
 
 int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->set_fan_speed_percent)
@@ -2402,8 +2550,12 @@ int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
 
 int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->get_fan_speed_rpm)
@@ -2416,8 +2568,12 @@ int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
 
 int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->set_deep_sleep_dcefclk)
@@ -2430,8 +2586,12 @@ int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
 
 int smu_set_active_display_count(struct smu_context *smu, uint32_t count)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        if (smu->ppt_funcs->set_active_display_count)
                ret = smu->ppt_funcs->set_active_display_count(smu, count);
 
@@ -2442,8 +2602,12 @@ int smu_get_clock_by_type(struct smu_context *smu,
                          enum amd_pp_clock_type type,
                          struct amd_pp_clocks *clocks)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->get_clock_by_type)
@@ -2457,8 +2621,12 @@ int smu_get_clock_by_type(struct smu_context *smu,
 int smu_get_max_high_clocks(struct smu_context *smu,
                            struct amd_pp_simple_clock_info *clocks)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->get_max_high_clocks)
@@ -2473,8 +2641,12 @@ int smu_get_clock_by_type_with_latency(struct smu_context *smu,
                                       enum smu_clk_type clk_type,
                                       struct pp_clock_levels_with_latency *clocks)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->get_clock_by_type_with_latency)
@@ -2489,8 +2661,12 @@ int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
                                       enum amd_pp_clock_type type,
                                       struct pp_clock_levels_with_voltage *clocks)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->get_clock_by_type_with_voltage)
@@ -2505,8 +2681,12 @@ int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
 int smu_display_clock_voltage_request(struct smu_context *smu,
                                      struct pp_display_clock_request *clock_req)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->display_clock_voltage_request)
@@ -2520,8 +2700,12 @@ int smu_display_clock_voltage_request(struct smu_context *smu,
 
 int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = -EINVAL;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->display_disable_memory_clock_switch)
@@ -2534,8 +2718,12 @@ int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disabl
 
 int smu_notify_smu_enable_pwe(struct smu_context *smu)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->notify_smu_enable_pwe)
@@ -2549,8 +2737,12 @@ int smu_notify_smu_enable_pwe(struct smu_context *smu)
 int smu_set_xgmi_pstate(struct smu_context *smu,
                        uint32_t pstate)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->set_xgmi_pstate)
@@ -2563,8 +2755,12 @@ int smu_set_xgmi_pstate(struct smu_context *smu,
 
 int smu_set_azalia_d3_pme(struct smu_context *smu)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->set_azalia_d3_pme)
@@ -2575,6 +2771,14 @@ int smu_set_azalia_d3_pme(struct smu_context *smu)
        return ret;
 }
 
+/*
+ * On system suspending or resetting, the dpm_enabled
+ * flag will be cleared. So that those SMU services which
+ * are not supported will be gated.
+ *
+ * However, the baco/mode1 reset should still be granted
+ * as they are still supported and necessary.
+ */
 bool smu_baco_is_support(struct smu_context *smu)
 {
        bool ret = false;
@@ -2646,8 +2850,12 @@ int smu_mode2_reset(struct smu_context *smu)
 int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
                                         struct pp_smu_nv_clock_table *max_clocks)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
@@ -2662,8 +2870,12 @@ int smu_get_uclk_dpm_states(struct smu_context *smu,
                            unsigned int *clock_values_in_khz,
                            unsigned int *num_states)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->get_uclk_dpm_states)
@@ -2677,6 +2889,10 @@ int smu_get_uclk_dpm_states(struct smu_context *smu,
 enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
 {
        enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
+       struct amdgpu_device *adev = smu->adev;
+
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
 
        mutex_lock(&smu->mutex);
 
@@ -2691,8 +2907,12 @@ enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
 int smu_get_dpm_clock_table(struct smu_context *smu,
                            struct dpm_clocks *clock_table)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
+       if (!adev->pm.dpm_enabled)
+               return -EINVAL;
+
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->get_dpm_clock_table)
index 1ef0923f7190676d26efcfb882a730d7c7b0e13b..27c5fc9572b27bba60334706e3ab59b83bd00425 100644 (file)
@@ -128,6 +128,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_message_map[SMU_MSG_MAX_COUNT]
        MSG_MAP(SetXgmiMode,                         PPSMC_MSG_SetXgmiMode),
        MSG_MAP(SetMemoryChannelEnable,              PPSMC_MSG_SetMemoryChannelEnable),
        MSG_MAP(DFCstateControl,                     PPSMC_MSG_DFCstateControl),
+       MSG_MAP(GmiPwrDnControl,                     PPSMC_MSG_GmiPwrDnControl),
 };
 
 static struct smu_11_0_cmn2aisc_mapping arcturus_clk_map[SMU_CLK_COUNT] = {
@@ -622,6 +623,9 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
        struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
        struct arcturus_dpm_table *dpm_table = NULL;
 
+       if (amdgpu_ras_intr_triggered())
+               return snprintf(buf, PAGE_SIZE, "unavailable\n");
+
        dpm_table = smu_dpm->dpm_context;
 
        switch (type) {
@@ -997,6 +1001,9 @@ static int arcturus_read_sensor(struct smu_context *smu,
        PPTable_t *pptable = table_context->driver_pptable;
        int ret = 0;
 
+       if (amdgpu_ras_intr_triggered())
+               return 0;
+
        if (!data || !size)
                return -EINVAL;
 
@@ -2226,12 +2233,8 @@ static const struct i2c_algorithm arcturus_i2c_eeprom_i2c_algo = {
 static int arcturus_i2c_eeprom_control_init(struct i2c_adapter *control)
 {
        struct amdgpu_device *adev = to_amdgpu_device(control);
-       struct smu_context *smu = &adev->smu;
        int res;
 
-       if (!smu->pm_enabled)
-               return -EOPNOTSUPP;
-
        control->owner = THIS_MODULE;
        control->class = I2C_CLASS_SPD;
        control->dev.parent = &adev->pdev->dev;
@@ -2247,12 +2250,6 @@ static int arcturus_i2c_eeprom_control_init(struct i2c_adapter *control)
 
 static void arcturus_i2c_eeprom_control_fini(struct i2c_adapter *control)
 {
-       struct amdgpu_device *adev = to_amdgpu_device(control);
-       struct smu_context *smu = &adev->smu;
-
-       if (!smu->pm_enabled)
-               return;
-
        i2c_del_adapter(control);
 }
 
@@ -2261,7 +2258,7 @@ static bool arcturus_is_baco_supported(struct smu_context *smu)
        struct amdgpu_device *adev = smu->adev;
        uint32_t val;
 
-       if (!smu_v11_0_baco_is_support(smu))
+       if (!smu_v11_0_baco_is_support(smu) || amdgpu_sriov_vf(adev))
                return false;
 
        val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
@@ -2296,6 +2293,35 @@ static int arcturus_set_df_cstate(struct smu_context *smu,
        return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL);
 }
 
+static int arcturus_allow_xgmi_power_down(struct smu_context *smu, bool en)
+{
+       uint32_t smu_version;
+       int ret;
+
+       ret = smu_get_smc_version(smu, NULL, &smu_version);
+       if (ret) {
+               pr_err("Failed to get smu version!\n");
+               return ret;
+       }
+
+       /* PPSMC_MSG_GmiPwrDnControl is supported by 54.23.0 and onwards */
+       if (smu_version < 0x00361700) {
+               pr_err("XGMI power down control is only supported by PMFW 54.23.0 and onwards\n");
+               return -EINVAL;
+       }
+
+       if (en)
+               return smu_send_smc_msg_with_param(smu,
+                                                  SMU_MSG_GmiPwrDnControl,
+                                                  1,
+                                                  NULL);
+
+       return smu_send_smc_msg_with_param(smu,
+                                          SMU_MSG_GmiPwrDnControl,
+                                          0,
+                                          NULL);
+}
+
 static const struct pptable_funcs arcturus_ppt_funcs = {
        /* translate smu index into arcturus specific index */
        .get_smu_msg_index = arcturus_get_smu_msg_index,
@@ -2389,6 +2415,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
        .override_pcie_parameters = smu_v11_0_override_pcie_parameters,
        .get_pptable_power_limit = arcturus_get_pptable_power_limit,
        .set_df_cstate = arcturus_set_df_cstate,
+       .allow_xgmi_power_down = arcturus_allow_xgmi_power_down,
 };
 
 void arcturus_set_ppt_funcs(struct smu_context *smu)
index 689072a312a7fb52a015eedf4bd9d53d9720d029..c9cfe90a29471c6590b34b228711ae98c5e06658 100644 (file)
@@ -36,6 +36,8 @@
 #include "power_state.h"
 #include "soc15_common.h"
 #include "smu10.h"
+#include "asic_reg/pwr/pwr_10_0_offset.h"
+#include "asic_reg/pwr/pwr_10_0_sh_mask.h"
 
 #define SMU10_MAX_DEEPSLEEP_DIVIDER_ID     5
 #define SMU10_MINIMUM_ENGINE_CLOCK         800   /* 8Mhz, the low boundary of engine clock allowed on this chip */
 #define SMU10_DISPCLK_BYPASS_THRESHOLD     10000 /* 100Mhz */
 #define SMC_RAM_END                     0x40000
 
-#define mmPWR_MISC_CNTL_STATUS                                 0x0183
-#define mmPWR_MISC_CNTL_STATUS_BASE_IDX                                0
-#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT       0x0
-#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT         0x1
-#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK         0x00000001L
-#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK           0x00000006L
-
 static const unsigned long SMU10_Magic = (unsigned long) PHM_Rv_Magic;
 
 
@@ -81,7 +76,7 @@ static int smu10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
                pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
                return -EINVAL;
        }
-       smum_send_msg_to_smc_with_parameter(hwmgr, msg, clk_freq);
+       smum_send_msg_to_smc_with_parameter(hwmgr, msg, clk_freq, NULL);
 
        return 0;
 }
@@ -214,7 +209,8 @@ static int smu10_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clo
                smu10_data->deep_sleep_dcefclk = clock;
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_SetMinDeepSleepDcefclk,
-                                       smu10_data->deep_sleep_dcefclk);
+                                       smu10_data->deep_sleep_dcefclk,
+                                       NULL);
        }
        return 0;
 }
@@ -228,7 +224,8 @@ static int smu10_set_hard_min_dcefclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t c
                smu10_data->dcf_actual_hard_min_freq = clock;
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_SetHardMinDcefclkByFreq,
-                                       smu10_data->dcf_actual_hard_min_freq);
+                                       smu10_data->dcf_actual_hard_min_freq,
+                                       NULL);
        }
        return 0;
 }
@@ -242,7 +239,8 @@ static int smu10_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t cloc
                smu10_data->f_actual_hard_min_freq = clock;
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_SetHardMinFclkByFreq,
-                                       smu10_data->f_actual_hard_min_freq);
+                                       smu10_data->f_actual_hard_min_freq,
+                                       NULL);
        }
        return 0;
 }
@@ -255,7 +253,8 @@ static int smu10_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count
                smu10_data->num_active_display = count;
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetDisplayCount,
-                               smu10_data->num_active_display);
+                               smu10_data->num_active_display,
+                               NULL);
        }
 
        return 0;
@@ -278,7 +277,8 @@ static int smu10_init_power_gate_state(struct pp_hwmgr *hwmgr)
        if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)
                return smum_send_msg_to_smc_with_parameter(hwmgr,
                                                           PPSMC_MSG_SetGfxCGPG,
-                                                          true);
+                                                          true,
+                                                          NULL);
        else
                return 0;
 }
@@ -324,7 +324,7 @@ static int smu10_disable_gfx_off(struct pp_hwmgr *hwmgr)
        struct amdgpu_device *adev = hwmgr->adev;
 
        if (adev->pm.pp_feature & PP_GFXOFF_MASK) {
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableGfxOff);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableGfxOff, NULL);
 
                /* confirm gfx is back to "on" state */
                while (!smu10_is_gfx_on(hwmgr))
@@ -344,7 +344,7 @@ static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr)
        struct amdgpu_device *adev = hwmgr->adev;
 
        if (adev->pm.pp_feature & PP_GFXOFF_MASK)
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableGfxOff);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableGfxOff, NULL);
 
        return 0;
 }
@@ -410,12 +410,10 @@ static int smu10_get_clock_voltage_dependency_table(struct pp_hwmgr *hwmgr,
                        struct smu10_voltage_dependency_table **pptable,
                        uint32_t num_entry, const DpmClock_t *pclk_dependency_table)
 {
-       uint32_t table_size, i;
+       uint32_t i;
        struct smu10_voltage_dependency_table *ptable;
 
-       table_size = sizeof(uint32_t) + sizeof(struct smu10_voltage_dependency_table) * num_entry;
-       ptable = kzalloc(table_size, GFP_KERNEL);
-
+       ptable = kzalloc(struct_size(ptable, entries, num_entry), GFP_KERNEL);
        if (NULL == ptable)
                return -ENOMEM;
 
@@ -479,12 +477,10 @@ static int smu10_populate_clock_table(struct pp_hwmgr *hwmgr)
        smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_phyclk,
                                        ARRAY_SIZE(VddPhyClk), &VddPhyClk[0]);
 
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency);
-       result = smum_get_argument(hwmgr);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &result);
        smu10_data->gfx_min_freq_limit = result / 10 * 1000;
 
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency);
-       result = smum_get_argument(hwmgr);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &result);
        smu10_data->gfx_max_freq_limit = result / 10 * 1000;
 
        return 0;
@@ -588,116 +584,148 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
        case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinGfxClk,
-                                               data->gfx_max_freq_limit/100);
+                                               data->gfx_max_freq_limit/100,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinFclkByFreq,
-                                               SMU10_UMD_PSTATE_PEAK_FCLK);
+                                               SMU10_UMD_PSTATE_PEAK_FCLK,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinSocclkByFreq,
-                                               SMU10_UMD_PSTATE_PEAK_SOCCLK);
+                                               SMU10_UMD_PSTATE_PEAK_SOCCLK,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinVcn,
-                                               SMU10_UMD_PSTATE_VCE);
+                                               SMU10_UMD_PSTATE_VCE,
+                                               NULL);
 
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxGfxClk,
-                                               data->gfx_max_freq_limit/100);
+                                               data->gfx_max_freq_limit/100,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxFclkByFreq,
-                                               SMU10_UMD_PSTATE_PEAK_FCLK);
+                                               SMU10_UMD_PSTATE_PEAK_FCLK,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxSocclkByFreq,
-                                               SMU10_UMD_PSTATE_PEAK_SOCCLK);
+                                               SMU10_UMD_PSTATE_PEAK_SOCCLK,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxVcn,
-                                               SMU10_UMD_PSTATE_VCE);
+                                               SMU10_UMD_PSTATE_VCE,
+                                               NULL);
                break;
        case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinGfxClk,
-                                               min_sclk);
+                                               min_sclk,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxGfxClk,
-                                               min_sclk);
+                                               min_sclk,
+                                               NULL);
                break;
        case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinFclkByFreq,
-                                               min_mclk);
+                                               min_mclk,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxFclkByFreq,
-                                               min_mclk);
+                                               min_mclk,
+                                               NULL);
                break;
        case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinGfxClk,
-                                               SMU10_UMD_PSTATE_GFXCLK);
+                                               SMU10_UMD_PSTATE_GFXCLK,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinFclkByFreq,
-                                               SMU10_UMD_PSTATE_FCLK);
+                                               SMU10_UMD_PSTATE_FCLK,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinSocclkByFreq,
-                                               SMU10_UMD_PSTATE_SOCCLK);
+                                               SMU10_UMD_PSTATE_SOCCLK,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinVcn,
-                                               SMU10_UMD_PSTATE_VCE);
+                                               SMU10_UMD_PSTATE_VCE,
+                                               NULL);
 
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxGfxClk,
-                                               SMU10_UMD_PSTATE_GFXCLK);
+                                               SMU10_UMD_PSTATE_GFXCLK,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxFclkByFreq,
-                                               SMU10_UMD_PSTATE_FCLK);
+                                               SMU10_UMD_PSTATE_FCLK,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxSocclkByFreq,
-                                               SMU10_UMD_PSTATE_SOCCLK);
+                                               SMU10_UMD_PSTATE_SOCCLK,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxVcn,
-                                               SMU10_UMD_PSTATE_VCE);
+                                               SMU10_UMD_PSTATE_VCE,
+                                               NULL);
                break;
        case AMD_DPM_FORCED_LEVEL_AUTO:
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinGfxClk,
-                                               min_sclk);
+                                               min_sclk,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinFclkByFreq,
                                                hwmgr->display_config->num_display > 3 ?
                                                SMU10_UMD_PSTATE_PEAK_FCLK :
-                                               min_mclk);
+                                               min_mclk,
+                                               NULL);
 
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinSocclkByFreq,
-                                               SMU10_UMD_PSTATE_MIN_SOCCLK);
+                                               SMU10_UMD_PSTATE_MIN_SOCCLK,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinVcn,
-                                               SMU10_UMD_PSTATE_MIN_VCE);
+                                               SMU10_UMD_PSTATE_MIN_VCE,
+                                               NULL);
 
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxGfxClk,
-                                               data->gfx_max_freq_limit/100);
+                                               data->gfx_max_freq_limit/100,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxFclkByFreq,
-                                               SMU10_UMD_PSTATE_PEAK_FCLK);
+                                               SMU10_UMD_PSTATE_PEAK_FCLK,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxSocclkByFreq,
-                                               SMU10_UMD_PSTATE_PEAK_SOCCLK);
+                                               SMU10_UMD_PSTATE_PEAK_SOCCLK,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxVcn,
-                                               SMU10_UMD_PSTATE_VCE);
+                                               SMU10_UMD_PSTATE_VCE,
+                                               NULL);
                break;
        case AMD_DPM_FORCED_LEVEL_LOW:
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinGfxClk,
-                                               data->gfx_min_freq_limit/100);
+                                               data->gfx_min_freq_limit/100,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxGfxClk,
-                                               data->gfx_min_freq_limit/100);
+                                               data->gfx_min_freq_limit/100,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinFclkByFreq,
-                                               min_mclk);
+                                               min_mclk,
+                                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxFclkByFreq,
-                                               min_mclk);
+                                               min_mclk,
+                                               NULL);
                break;
        case AMD_DPM_FORCED_LEVEL_MANUAL:
        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
@@ -849,13 +877,15 @@ static int smu10_force_clock_level(struct pp_hwmgr *hwmgr,
                                                PPSMC_MSG_SetHardMinGfxClk,
                                                low == 2 ? data->gfx_max_freq_limit/100 :
                                                low == 1 ? SMU10_UMD_PSTATE_GFXCLK :
-                                               data->gfx_min_freq_limit/100);
+                                               data->gfx_min_freq_limit/100,
+                                               NULL);
 
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxGfxClk,
                                                high == 0 ? data->gfx_min_freq_limit/100 :
                                                high == 1 ? SMU10_UMD_PSTATE_GFXCLK :
-                                               data->gfx_max_freq_limit/100);
+                                               data->gfx_max_freq_limit/100,
+                                               NULL);
                break;
 
        case PP_MCLK:
@@ -864,11 +894,13 @@ static int smu10_force_clock_level(struct pp_hwmgr *hwmgr,
 
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinFclkByFreq,
-                                               mclk_table->entries[low].clk/100);
+                                               mclk_table->entries[low].clk/100,
+                                               NULL);
 
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxFclkByFreq,
-                                               mclk_table->entries[high].clk/100);
+                                               mclk_table->entries[high].clk/100,
+                                               NULL);
                break;
 
        case PP_PCIE:
@@ -888,8 +920,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
 
        switch (type) {
        case PP_SCLK:
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency);
-               now = smum_get_argument(hwmgr);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &now);
 
        /* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */
                if (now == data->gfx_max_freq_limit/100)
@@ -910,8 +941,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
                                        i == 2 ? "*" : "");
                break;
        case PP_MCLK:
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency);
-               now = smum_get_argument(hwmgr);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &now);
 
                for (i = 0; i < mclk_table->count; i++)
                        size += sprintf(buf + size, "%d: %uMhz %s\n",
@@ -1122,15 +1152,13 @@ static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
 
        switch (idx) {
        case AMDGPU_PP_SENSOR_GFX_SCLK:
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency);
-               sclk = smum_get_argument(hwmgr);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &sclk);
                        /* in units of 10KHZ */
                *((uint32_t *)value) = sclk * 100;
                *size = 4;
                break;
        case AMDGPU_PP_SENSOR_GFX_MCLK:
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency);
-               mclk = smum_get_argument(hwmgr);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &mclk);
                        /* in units of 10KHZ */
                *((uint32_t *)value) = mclk * 100;
                *size = 4;
@@ -1166,20 +1194,20 @@ static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
 static int smu10_smus_notify_pwe(struct pp_hwmgr *hwmgr)
 {
 
-       return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SetRccPfcPmeRestoreRegister);
+       return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SetRccPfcPmeRestoreRegister, NULL);
 }
 
 static int smu10_powergate_mmhub(struct pp_hwmgr *hwmgr)
 {
-       return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub);
+       return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub, NULL);
 }
 
 static int smu10_powergate_sdma(struct pp_hwmgr *hwmgr, bool gate)
 {
        if (gate)
-               return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerDownSdma);
+               return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerDownSdma, NULL);
        else
-               return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerUpSdma);
+               return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerUpSdma, NULL);
 }
 
 static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate)
@@ -1191,11 +1219,11 @@ static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate)
                                                AMD_IP_BLOCK_TYPE_VCN,
                                                AMD_PG_STATE_GATE);
                smum_send_msg_to_smc_with_parameter(hwmgr,
-                                       PPSMC_MSG_PowerDownVcn, 0);
+                                       PPSMC_MSG_PowerDownVcn, 0, NULL);
                smu10_data->vcn_power_gated = true;
        } else {
                smum_send_msg_to_smc_with_parameter(hwmgr,
-                                               PPSMC_MSG_PowerUpVcn, 0);
+                                               PPSMC_MSG_PowerUpVcn, 0, NULL);
                amdgpu_device_ip_set_powergating_state(hwmgr->adev,
                                                AMD_IP_BLOCK_TYPE_VCN,
                                                AMD_PG_STATE_UNGATE);
@@ -1274,8 +1302,7 @@ static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
 static bool smu10_is_raven1_refresh(struct pp_hwmgr *hwmgr)
 {
        struct amdgpu_device *adev = hwmgr->adev;
-       if ((adev->asic_type == CHIP_RAVEN) &&
-           (adev->rev_id != 0x15d8) &&
+       if ((adev->apu_flags & AMD_APU_IS_RAVEN) &&
            (hwmgr->smu_version >= 0x41e2b))
                return true;
        else
@@ -1304,7 +1331,8 @@ static int smu10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uin
                hwmgr->gfxoff_state_changed_by_workload = true;
        }
        result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ActiveProcessNotify,
-                                               1 << workload_type);
+                                               1 << workload_type,
+                                               NULL);
        if (!result)
                hwmgr->power_profile_mode = input[size];
        if (workload_type && hwmgr->gfxoff_state_changed_by_workload) {
@@ -1319,13 +1347,13 @@ static int smu10_asic_reset(struct pp_hwmgr *hwmgr, enum SMU_ASIC_RESET_MODE mod
 {
        return smum_send_msg_to_smc_with_parameter(hwmgr,
                                                   PPSMC_MSG_DeviceDriverReset,
-                                                  mode);
+                                                  mode,
+                                                  NULL);
 }
 
 static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
        .backend_init = smu10_hwmgr_backend_init,
        .backend_fini = smu10_hwmgr_backend_fini,
-       .asic_setup = NULL,
        .apply_state_adjust_rules = smu10_apply_state_adjust_rules,
        .force_dpm_level = smu10_dpm_force_dpm_level,
        .get_power_state_size = smu10_get_power_state_size,
index 1fb296a996f3a91a1887128cd8c3802e51ca3a6b..0f969de10fabc3a8ace14dd2798a35b529653a27 100644 (file)
@@ -192,7 +192,7 @@ struct smu10_clock_voltage_dependency_record {
 
 struct smu10_voltage_dependency_table {
        uint32_t count;
-       struct smu10_clock_voltage_dependency_record entries[1];
+       struct smu10_clock_voltage_dependency_record entries[];
 };
 
 struct smu10_clock_voltage_information {
index 683b29a993666513d0330291ef80a11522df381a..f2bda3bcbbde23b1b5963b8f7e3a45b160070122 100644 (file)
@@ -29,14 +29,16 @@ static int smu7_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
 {
        return smum_send_msg_to_smc(hwmgr, enable ?
                        PPSMC_MSG_UVDDPM_Enable :
-                       PPSMC_MSG_UVDDPM_Disable);
+                       PPSMC_MSG_UVDDPM_Disable,
+                       NULL);
 }
 
 static int smu7_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
 {
        return smum_send_msg_to_smc(hwmgr, enable ?
                        PPSMC_MSG_VCEDPM_Enable :
-                       PPSMC_MSG_VCEDPM_Disable);
+                       PPSMC_MSG_VCEDPM_Disable,
+                       NULL);
 }
 
 static int smu7_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
@@ -57,7 +59,8 @@ int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr)
 {
        if (phm_cf_want_uvd_power_gating(hwmgr))
                return smum_send_msg_to_smc(hwmgr,
-                               PPSMC_MSG_UVDPowerOFF);
+                               PPSMC_MSG_UVDPowerOFF,
+                               NULL);
        return 0;
 }
 
@@ -67,10 +70,10 @@ static int smu7_powerup_uvd(struct pp_hwmgr *hwmgr)
                if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
                                  PHM_PlatformCaps_UVDDynamicPowerGating)) {
                        return smum_send_msg_to_smc_with_parameter(hwmgr,
-                                       PPSMC_MSG_UVDPowerON, 1);
+                                       PPSMC_MSG_UVDPowerON, 1, NULL);
                } else {
                        return smum_send_msg_to_smc_with_parameter(hwmgr,
-                                       PPSMC_MSG_UVDPowerON, 0);
+                                       PPSMC_MSG_UVDPowerON, 0, NULL);
                }
        }
 
@@ -81,7 +84,8 @@ static int smu7_powerdown_vce(struct pp_hwmgr *hwmgr)
 {
        if (phm_cf_want_vce_power_gating(hwmgr))
                return smum_send_msg_to_smc(hwmgr,
-                               PPSMC_MSG_VCEPowerOFF);
+                               PPSMC_MSG_VCEPowerOFF,
+                               NULL);
        return 0;
 }
 
@@ -89,7 +93,8 @@ static int smu7_powerup_vce(struct pp_hwmgr *hwmgr)
 {
        if (phm_cf_want_vce_power_gating(hwmgr))
                return smum_send_msg_to_smc(hwmgr,
-                               PPSMC_MSG_VCEPowerON);
+                               PPSMC_MSG_VCEPowerON,
+                               NULL);
        return 0;
 }
 
@@ -181,7 +186,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
                                value = CG_GFX_CGCG_MASK;
 
                                if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr, msg, value))
+                                               hwmgr, msg, value, NULL))
                                        return -EINVAL;
                        }
                        if (PP_STATE_SUPPORT_LS & *msg_id) {
@@ -191,7 +196,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
                                value = CG_GFX_CGLS_MASK;
 
                                if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr, msg, value))
+                                               hwmgr, msg, value, NULL))
                                        return -EINVAL;
                        }
                        break;
@@ -204,7 +209,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
                                value = CG_GFX_3DCG_MASK;
 
                                if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr, msg, value))
+                                               hwmgr, msg, value, NULL))
                                        return -EINVAL;
                        }
 
@@ -215,7 +220,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
                                value = CG_GFX_3DLS_MASK;
 
                                if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr, msg, value))
+                                               hwmgr, msg, value, NULL))
                                        return -EINVAL;
                        }
                        break;
@@ -228,7 +233,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
                                value = CG_GFX_RLC_LS_MASK;
 
                                if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr, msg, value))
+                                               hwmgr, msg, value, NULL))
                                        return -EINVAL;
                        }
                        break;
@@ -241,7 +246,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
                                value = CG_GFX_CP_LS_MASK;
 
                                if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr, msg, value))
+                                               hwmgr, msg, value, NULL))
                                        return -EINVAL;
                        }
                        break;
@@ -255,7 +260,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
                                                CG_GFX_OTHERS_MGCG_MASK);
 
                                if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr, msg, value))
+                                               hwmgr, msg, value, NULL))
                                        return -EINVAL;
                        }
                        break;
@@ -275,7 +280,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
                                value = CG_SYS_BIF_MGCG_MASK;
 
                                if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr, msg, value))
+                                               hwmgr, msg, value, NULL))
                                        return -EINVAL;
                        }
                        if  (PP_STATE_SUPPORT_LS & *msg_id) {
@@ -285,7 +290,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
                                value = CG_SYS_BIF_MGLS_MASK;
 
                                if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr, msg, value))
+                                               hwmgr, msg, value, NULL))
                                        return -EINVAL;
                        }
                        break;
@@ -298,7 +303,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
                                value = CG_SYS_MC_MGCG_MASK;
 
                                if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr, msg, value))
+                                               hwmgr, msg, value, NULL))
                                        return -EINVAL;
                        }
 
@@ -309,7 +314,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
                                value = CG_SYS_MC_MGLS_MASK;
 
                                if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr, msg, value))
+                                               hwmgr, msg, value, NULL))
                                        return -EINVAL;
                        }
                        break;
@@ -322,7 +327,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
                                value = CG_SYS_DRM_MGCG_MASK;
 
                                if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr, msg, value))
+                                               hwmgr, msg, value, NULL))
                                        return -EINVAL;
                        }
                        if (PP_STATE_SUPPORT_LS & *msg_id) {
@@ -332,7 +337,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
                                value = CG_SYS_DRM_MGLS_MASK;
 
                                if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr, msg, value))
+                                               hwmgr, msg, value, NULL))
                                        return -EINVAL;
                        }
                        break;
@@ -345,7 +350,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
                                value = CG_SYS_HDP_MGCG_MASK;
 
                                if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr, msg, value))
+                                               hwmgr, msg, value, NULL))
                                        return -EINVAL;
                        }
 
@@ -356,7 +361,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
                                value = CG_SYS_HDP_MGLS_MASK;
 
                                if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr, msg, value))
+                                               hwmgr, msg, value, NULL))
                                        return -EINVAL;
                        }
                        break;
@@ -369,7 +374,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
                                value = CG_SYS_SDMA_MGCG_MASK;
 
                                if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr, msg, value))
+                                               hwmgr, msg, value, NULL))
                                        return -EINVAL;
                        }
 
@@ -380,7 +385,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
                                value = CG_SYS_SDMA_MGLS_MASK;
 
                                if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr, msg, value))
+                                               hwmgr, msg, value, NULL))
                                        return -EINVAL;
                        }
                        break;
@@ -393,7 +398,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
                                value = CG_SYS_ROM_MASK;
 
                                if (smum_send_msg_to_smc_with_parameter(
-                                               hwmgr, msg, value))
+                                               hwmgr, msg, value, NULL))
                                        return -EINVAL;
                        }
                        break;
@@ -423,8 +428,10 @@ int smu7_powergate_gfx(struct pp_hwmgr *hwmgr, bool enable)
        if (enable)
                return smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_GFX_CU_PG_ENABLE,
-                                       adev->gfx.cu_info.number);
+                                       adev->gfx.cu_info.number,
+                                       NULL);
        else
                return smum_send_msg_to_smc(hwmgr,
-                               PPSMC_MSG_GFX_CU_PG_DISABLE);
+                               PPSMC_MSG_GFX_CU_PG_DISABLE,
+                               NULL);
 }
index 4795eb66b2b2355fa6deaaddd456f7ade6f8f75f..753cb2cf6b77e668fd4ec93f3eb0e7ce709b0563 100644 (file)
@@ -186,7 +186,7 @@ static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
        }
 
        if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable, NULL);
 
        return 0;
 }
@@ -493,7 +493,7 @@ static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
 
 static int smu7_reset_to_default(struct pp_hwmgr *hwmgr)
 {
-       return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ResetToDefaults);
+       return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ResetToDefaults, NULL);
 }
 
 /**
@@ -979,7 +979,8 @@ static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
                        PHM_PlatformCaps_RegulatorHot))
                return smum_send_msg_to_smc(hwmgr,
-                               PPSMC_MSG_EnableVRHotGPIOInterrupt);
+                               PPSMC_MSG_EnableVRHotGPIOInterrupt,
+                               NULL);
 
        return 0;
 }
@@ -996,7 +997,7 @@ static int smu7_enable_ulv(struct pp_hwmgr *hwmgr)
        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 
        if (data->ulv_supported)
-               return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableULV);
+               return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableULV, NULL);
 
        return 0;
 }
@@ -1006,7 +1007,7 @@ static int smu7_disable_ulv(struct pp_hwmgr *hwmgr)
        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 
        if (data->ulv_supported)
-               return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableULV);
+               return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableULV, NULL);
 
        return 0;
 }
@@ -1015,13 +1016,14 @@ static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
 {
        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
                        PHM_PlatformCaps_SclkDeepSleep)) {
-               if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_ON))
+               if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_ON, NULL))
                        PP_ASSERT_WITH_CODE(false,
                                        "Attempt to enable Master Deep Sleep switch failed!",
                                        return -EINVAL);
        } else {
                if (smum_send_msg_to_smc(hwmgr,
-                               PPSMC_MSG_MASTER_DeepSleep_OFF)) {
+                               PPSMC_MSG_MASTER_DeepSleep_OFF,
+                               NULL)) {
                        PP_ASSERT_WITH_CODE(false,
                                        "Attempt to disable Master Deep Sleep switch failed!",
                                        return -EINVAL);
@@ -1036,7 +1038,8 @@ static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
                        PHM_PlatformCaps_SclkDeepSleep)) {
                if (smum_send_msg_to_smc(hwmgr,
-                               PPSMC_MSG_MASTER_DeepSleep_OFF)) {
+                               PPSMC_MSG_MASTER_DeepSleep_OFF,
+                               NULL)) {
                        PP_ASSERT_WITH_CODE(false,
                                        "Attempt to disable Master Deep Sleep switch failed!",
                                        return -EINVAL);
@@ -1089,7 +1092,7 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
                        smu7_disable_sclk_vce_handshake(hwmgr);
 
                PP_ASSERT_WITH_CODE(
-               (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable)),
+               (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable, NULL)),
                "Failed to enable SCLK DPM during DPM Start Function!",
                return -EINVAL);
        }
@@ -1101,7 +1104,8 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
 
                PP_ASSERT_WITH_CODE(
                                (0 == smum_send_msg_to_smc(hwmgr,
-                                               PPSMC_MSG_MCLKDPM_Enable)),
+                                               PPSMC_MSG_MCLKDPM_Enable,
+                                               NULL)),
                                "Failed to enable MCLK DPM during DPM Start Function!",
                                return -EINVAL);
 
@@ -1172,7 +1176,8 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
        if (0 == data->pcie_dpm_key_disabled) {
                PP_ASSERT_WITH_CODE(
                                (0 == smum_send_msg_to_smc(hwmgr,
-                                               PPSMC_MSG_PCIeDPM_Enable)),
+                                               PPSMC_MSG_PCIeDPM_Enable,
+                                               NULL)),
                                "Failed to enable pcie DPM during DPM Start Function!",
                                return -EINVAL);
        }
@@ -1180,7 +1185,8 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
                                PHM_PlatformCaps_Falcon_QuickTransition)) {
                PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr,
-                               PPSMC_MSG_EnableACDCGPIOInterrupt)),
+                               PPSMC_MSG_EnableACDCGPIOInterrupt,
+                               NULL)),
                                "Failed to enable AC DC GPIO Interrupt!",
                                );
        }
@@ -1197,7 +1203,7 @@ static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
                PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
                                "Trying to disable SCLK DPM when DPM is disabled",
                                return 0);
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Disable);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Disable, NULL);
        }
 
        /* disable MCLK dpm */
@@ -1205,7 +1211,7 @@ static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
                PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
                                "Trying to disable MCLK DPM when DPM is disabled",
                                return 0);
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Disable);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Disable, NULL);
        }
 
        return 0;
@@ -1226,7 +1232,8 @@ static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
        if (!data->pcie_dpm_key_disabled) {
                PP_ASSERT_WITH_CODE(
                                (smum_send_msg_to_smc(hwmgr,
-                                               PPSMC_MSG_PCIeDPM_Disable) == 0),
+                                               PPSMC_MSG_PCIeDPM_Disable,
+                                               NULL) == 0),
                                "Failed to disable pcie DPM during DPM Stop Function!",
                                return -EINVAL);
        }
@@ -1237,7 +1244,7 @@ static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
                        "Trying to disable voltage DPM when DPM is disabled",
                        return 0);
 
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Disable);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Disable, NULL);
 
        return 0;
 }
@@ -1388,7 +1395,7 @@ static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
        PP_ASSERT_WITH_CODE((0 == tmp_result),
                        "Failed to enable VR hot GPIO interrupt!", result = tmp_result);
 
-       smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay);
+       smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay, NULL);
 
        tmp_result = smu7_enable_sclk_control(hwmgr);
        PP_ASSERT_WITH_CODE((0 == tmp_result),
@@ -1446,14 +1453,14 @@ static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable)
                if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
                                CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
                        PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
-                                       hwmgr, PPSMC_MSG_EnableAvfs),
+                                       hwmgr, PPSMC_MSG_EnableAvfs, NULL),
                                        "Failed to enable AVFS!",
                                        return -EINVAL);
                }
        } else if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
                        CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
                PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
-                               hwmgr, PPSMC_MSG_DisableAvfs),
+                               hwmgr, PPSMC_MSG_DisableAvfs, NULL),
                                "Failed to disable AVFS!",
                                return -EINVAL);
        }
@@ -2609,7 +2616,8 @@ static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
 
                        if (level)
                                smum_send_msg_to_smc_with_parameter(hwmgr,
-                                               PPSMC_MSG_PCIeDPM_ForceLevel, level);
+                                               PPSMC_MSG_PCIeDPM_ForceLevel, level,
+                                               NULL);
                }
        }
 
@@ -2623,7 +2631,8 @@ static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
                        if (level)
                                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SCLKDPM_SetEnabledMask,
-                                               (1 << level));
+                                               (1 << level),
+                                               NULL);
                }
        }
 
@@ -2637,7 +2646,8 @@ static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
                        if (level)
                                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_MCLKDPM_SetEnabledMask,
-                                               (1 << level));
+                                               (1 << level),
+                                               NULL);
                }
        }
 
@@ -2656,14 +2666,16 @@ static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
                if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
                        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_SCLKDPM_SetEnabledMask,
-                                       data->dpm_level_enable_mask.sclk_dpm_enable_mask);
+                                       data->dpm_level_enable_mask.sclk_dpm_enable_mask,
+                                       NULL);
        }
 
        if (!data->mclk_dpm_key_disabled) {
                if (data->dpm_level_enable_mask.mclk_dpm_enable_mask)
                        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_MCLKDPM_SetEnabledMask,
-                                       data->dpm_level_enable_mask.mclk_dpm_enable_mask);
+                                       data->dpm_level_enable_mask.mclk_dpm_enable_mask,
+                                       NULL);
        }
 
        return 0;
@@ -2678,7 +2690,8 @@ static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
 
        if (!data->pcie_dpm_key_disabled) {
                smum_send_msg_to_smc(hwmgr,
-                               PPSMC_MSG_PCIeDPM_UnForceLevel);
+                               PPSMC_MSG_PCIeDPM_UnForceLevel,
+                               NULL);
        }
 
        return smu7_upload_dpm_level_enable_mask(hwmgr);
@@ -2696,7 +2709,8 @@ static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
                                                              data->dpm_level_enable_mask.sclk_dpm_enable_mask);
                        smum_send_msg_to_smc_with_parameter(hwmgr,
                                                            PPSMC_MSG_SCLKDPM_SetEnabledMask,
-                                                           (1 << level));
+                                                           (1 << level),
+                                                           NULL);
 
        }
 
@@ -2706,7 +2720,8 @@ static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
                                                              data->dpm_level_enable_mask.mclk_dpm_enable_mask);
                        smum_send_msg_to_smc_with_parameter(hwmgr,
                                                            PPSMC_MSG_MCLKDPM_SetEnabledMask,
-                                                           (1 << level));
+                                                           (1 << level),
+                                                           NULL);
                }
        }
 
@@ -2716,7 +2731,8 @@ static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
                                                              data->dpm_level_enable_mask.pcie_dpm_enable_mask);
                        smum_send_msg_to_smc_with_parameter(hwmgr,
                                                            PPSMC_MSG_PCIeDPM_ForceLevel,
-                                                           (level));
+                                                           (level),
+                                                           NULL);
                }
        }
 
@@ -3495,21 +3511,20 @@ static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query)
            (adev->asic_type != CHIP_BONAIRE) &&
            (adev->asic_type != CHIP_FIJI) &&
            (adev->asic_type != CHIP_TONGA)) {
-               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0);
-               tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0, &tmp);
                *query = tmp;
 
                if (tmp != 0)
                        return 0;
        }
 
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart, NULL);
        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
                                                        ixSMU_PM_STATUS_95, 0);
 
        for (i = 0; i < 10; i++) {
                msleep(500);
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample, NULL);
                tmp = cgs_read_ind_register(hwmgr->device,
                                                CGS_IND_REG__SMC,
                                                ixSMU_PM_STATUS_95);
@@ -3534,14 +3549,12 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
 
        switch (idx) {
        case AMDGPU_PP_SENSOR_GFX_SCLK:
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency);
-               sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &sclk);
                *((uint32_t *)value) = sclk;
                *size = 4;
                return 0;
        case AMDGPU_PP_SENSOR_GFX_MCLK:
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency);
-               mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &mclk);
                *((uint32_t *)value) = mclk;
                *size = 4;
                return 0;
@@ -3730,7 +3743,8 @@ static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
                                "Trying to freeze SCLK DPM when DPM is disabled",
                                );
                PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
-                               PPSMC_MSG_SCLKDPM_FreezeLevel),
+                               PPSMC_MSG_SCLKDPM_FreezeLevel,
+                               NULL),
                                "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
                                return -EINVAL);
        }
@@ -3742,7 +3756,8 @@ static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
                                "Trying to freeze MCLK DPM when DPM is disabled",
                                );
                PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
-                               PPSMC_MSG_MCLKDPM_FreezeLevel),
+                               PPSMC_MSG_MCLKDPM_FreezeLevel,
+                               NULL),
                                "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
                                return -EINVAL);
        }
@@ -3884,7 +3899,8 @@ static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
                                "Trying to Unfreeze SCLK DPM when DPM is disabled",
                                );
                PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
-                               PPSMC_MSG_SCLKDPM_UnfreezeLevel),
+                               PPSMC_MSG_SCLKDPM_UnfreezeLevel,
+                               NULL),
                        "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
                        return -EINVAL);
        }
@@ -3896,7 +3912,8 @@ static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
                                "Trying to Unfreeze MCLK DPM when DPM is disabled",
                                );
                PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
-                               PPSMC_MSG_MCLKDPM_UnfreezeLevel),
+                               PPSMC_MSG_MCLKDPM_UnfreezeLevel,
+                               NULL),
                    "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
                    return -EINVAL);
        }
@@ -3949,12 +3966,14 @@ static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr)
        if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) {
                if (hwmgr->chip_id == CHIP_VEGAM)
                        smum_send_msg_to_smc_with_parameter(hwmgr,
-                                       (PPSMC_Msg)PPSMC_MSG_SetVBITimeout_VEGAM, data->frame_time_x2);
+                                       (PPSMC_Msg)PPSMC_MSG_SetVBITimeout_VEGAM, data->frame_time_x2,
+                                       NULL);
                else
                        smum_send_msg_to_smc_with_parameter(hwmgr,
-                                       (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
+                                       (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2,
+                                       NULL);
        }
-       return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ?  0 : -EINVAL;
+       return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay, NULL) == 0) ?  0 : -EINVAL;
 }
 
 static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
@@ -4040,7 +4059,8 @@ static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f
        advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
 
        return smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
+                       PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm,
+                       NULL);
 }
 
 static int
@@ -4048,7 +4068,7 @@ smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
 {
        PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
 
-       return (smum_send_msg_to_smc(hwmgr, msg) == 0) ?  0 : -1;
+       return (smum_send_msg_to_smc(hwmgr, msg, NULL) == 0) ?  0 : -1;
 }
 
 static int
@@ -4132,7 +4152,8 @@ static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f
        advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
 
        return smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
+                       PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm,
+                       NULL);
 }
 
 static const struct amdgpu_irq_src_funcs smu7_irq_funcs = {
@@ -4262,14 +4283,14 @@ static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr)
                if ((hwmgr->chip_id == CHIP_POLARIS10) ||
                    (hwmgr->chip_id == CHIP_POLARIS11) ||
                    (hwmgr->chip_id == CHIP_POLARIS12))
-                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableFFC);
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableFFC, NULL);
        } else {
                data->mem_latency_high = 330;
                data->mem_latency_low = 330;
                if ((hwmgr->chip_id == CHIP_POLARIS10) ||
                    (hwmgr->chip_id == CHIP_POLARIS11) ||
                    (hwmgr->chip_id == CHIP_POLARIS12))
-                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableFFC);
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableFFC, NULL);
        }
 
        return 0;
@@ -4413,13 +4434,15 @@ static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
                if (!data->sclk_dpm_key_disabled)
                        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_SCLKDPM_SetEnabledMask,
-                                       data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
+                                       data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask,
+                                       NULL);
                break;
        case PP_MCLK:
                if (!data->mclk_dpm_key_disabled)
                        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_MCLKDPM_SetEnabledMask,
-                                       data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
+                                       data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask,
+                                       NULL);
                break;
        case PP_PCIE:
        {
@@ -4427,11 +4450,13 @@ static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
 
                if (!data->pcie_dpm_key_disabled) {
                        if (fls(tmp) != ffs(tmp))
-                               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_UnForceLevel);
+                               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_UnForceLevel,
+                                               NULL);
                        else
                                smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_PCIeDPM_ForceLevel,
-                                       fls(tmp) - 1);
+                                       fls(tmp) - 1,
+                                       NULL);
                }
                break;
        }
@@ -4457,8 +4482,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
 
        switch (type) {
        case PP_SCLK:
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency);
-               clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &clock);
 
                for (i = 0; i < sclk_table->count; i++) {
                        if (clock > sclk_table->dpm_levels[i].value)
@@ -4473,8 +4497,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
                                        (i == now) ? "*" : "");
                break;
        case PP_MCLK:
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency);
-               clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &clock);
 
                for (i = 0; i < mclk_table->count; i++) {
                        if (clock > mclk_table->dpm_levels[i].value)
index 58f5589aaf126add63407b058e80e7d4bd2a4319..5d4971576111e1fcbde8036a86cb81b8147e74ee 100644 (file)
@@ -887,7 +887,10 @@ static int smu7_enable_didt(struct pp_hwmgr *hwmgr, const bool enable)
        didt_block |= block_en << TCP_Enable_SHIFT;
 
        if (enable)
-               result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_Didt_Block_Function, didt_block);
+               result = smum_send_msg_to_smc_with_parameter(hwmgr,
+                                               PPSMC_MSG_Didt_Block_Function,
+                                               didt_block,
+                                               NULL);
 
        return result;
 }
@@ -1009,7 +1012,8 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
 
                if (hwmgr->chip_id == CHIP_POLARIS11) {
                        result = smum_send_msg_to_smc(hwmgr,
-                                               (uint16_t)(PPSMC_MSG_EnableDpmDidt));
+                                               (uint16_t)(PPSMC_MSG_EnableDpmDidt),
+                                               NULL);
                        PP_ASSERT_WITH_CODE((0 == result),
                                        "Failed to enable DPM DIDT.", goto error);
                }
@@ -1042,7 +1046,8 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
                                goto error);
                if (hwmgr->chip_id == CHIP_POLARIS11) {
                        result = smum_send_msg_to_smc(hwmgr,
-                                               (uint16_t)(PPSMC_MSG_DisableDpmDidt));
+                                               (uint16_t)(PPSMC_MSG_DisableDpmDidt),
+                                               NULL);
                        PP_ASSERT_WITH_CODE((0 == result),
                                        "Failed to disable DPM DIDT.", goto error);
                }
@@ -1063,7 +1068,8 @@ int smu7_enable_smc_cac(struct pp_hwmgr *hwmgr)
        if (PP_CAP(PHM_PlatformCaps_CAC)) {
                int smc_result;
                smc_result = smum_send_msg_to_smc(hwmgr,
-                               (uint16_t)(PPSMC_MSG_EnableCac));
+                               (uint16_t)(PPSMC_MSG_EnableCac),
+                               NULL);
                PP_ASSERT_WITH_CODE((0 == smc_result),
                                "Failed to enable CAC in SMC.", result = -1);
 
@@ -1079,7 +1085,8 @@ int smu7_disable_smc_cac(struct pp_hwmgr *hwmgr)
 
        if (PP_CAP(PHM_PlatformCaps_CAC) && data->cac_enabled) {
                int smc_result = smum_send_msg_to_smc(hwmgr,
-                               (uint16_t)(PPSMC_MSG_DisableCac));
+                               (uint16_t)(PPSMC_MSG_DisableCac),
+                               NULL);
                PP_ASSERT_WITH_CODE((smc_result == 0),
                                "Failed to disable CAC in SMC.", result = -1);
 
@@ -1095,7 +1102,9 @@ int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
        if (data->power_containment_features &
                        POWERCONTAINMENT_FEATURE_PkgPwrLimit)
                return smum_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_PkgPwrSetLimit, n<<8);
+                               PPSMC_MSG_PkgPwrSetLimit,
+                               n<<8,
+                               NULL);
        return 0;
 }
 
@@ -1103,7 +1112,9 @@ static int smu7_set_overdriver_target_tdp(struct pp_hwmgr *hwmgr,
                                                uint32_t target_tdp)
 {
        return smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
+                       PPSMC_MSG_OverDriveSetTargetTdp,
+                       target_tdp,
+                       NULL);
 }
 
 int smu7_enable_power_containment(struct pp_hwmgr *hwmgr)
@@ -1124,7 +1135,8 @@ int smu7_enable_power_containment(struct pp_hwmgr *hwmgr)
        if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
                if (data->enable_tdc_limit_feature) {
                        smc_result = smum_send_msg_to_smc(hwmgr,
-                                       (uint16_t)(PPSMC_MSG_TDCLimitEnable));
+                                       (uint16_t)(PPSMC_MSG_TDCLimitEnable),
+                                       NULL);
                        PP_ASSERT_WITH_CODE((0 == smc_result),
                                        "Failed to enable TDCLimit in SMC.", result = -1;);
                        if (0 == smc_result)
@@ -1134,7 +1146,8 @@ int smu7_enable_power_containment(struct pp_hwmgr *hwmgr)
 
                if (data->enable_pkg_pwr_tracking_feature) {
                        smc_result = smum_send_msg_to_smc(hwmgr,
-                                       (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable));
+                                       (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable),
+                                       NULL);
                        PP_ASSERT_WITH_CODE((0 == smc_result),
                                        "Failed to enable PkgPwrTracking in SMC.", result = -1;);
                        if (0 == smc_result) {
@@ -1163,7 +1176,8 @@ int smu7_disable_power_containment(struct pp_hwmgr *hwmgr)
                if (data->power_containment_features &
                                POWERCONTAINMENT_FEATURE_TDCLimit) {
                        smc_result = smum_send_msg_to_smc(hwmgr,
-                                       (uint16_t)(PPSMC_MSG_TDCLimitDisable));
+                                       (uint16_t)(PPSMC_MSG_TDCLimitDisable),
+                                       NULL);
                        PP_ASSERT_WITH_CODE((smc_result == 0),
                                        "Failed to disable TDCLimit in SMC.",
                                        result = smc_result);
@@ -1172,7 +1186,8 @@ int smu7_disable_power_containment(struct pp_hwmgr *hwmgr)
                if (data->power_containment_features &
                                POWERCONTAINMENT_FEATURE_DTE) {
                        smc_result = smum_send_msg_to_smc(hwmgr,
-                                       (uint16_t)(PPSMC_MSG_DisableDTE));
+                                       (uint16_t)(PPSMC_MSG_DisableDTE),
+                                       NULL);
                        PP_ASSERT_WITH_CODE((smc_result == 0),
                                        "Failed to disable DTE in SMC.",
                                        result = smc_result);
@@ -1181,7 +1196,8 @@ int smu7_disable_power_containment(struct pp_hwmgr *hwmgr)
                if (data->power_containment_features &
                                POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
                        smc_result = smum_send_msg_to_smc(hwmgr,
-                                       (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable));
+                                       (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable),
+                                       NULL);
                        PP_ASSERT_WITH_CODE((smc_result == 0),
                                        "Failed to disable PkgPwrTracking in SMC.",
                                        result = smc_result);
index 5bdc0df5a9f42c0aeef45053f5849a35de963caf..0b30f73649a8384ea7fe0498d1dc2a61db995eff 100644 (file)
@@ -151,8 +151,8 @@ int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
        int result;
 
        if (PP_CAP(PHM_PlatformCaps_ODFuzzyFanControlSupport)) {
-               cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_FUZZY);
-               result = smum_send_msg_to_smc(hwmgr, PPSMC_StartFanControl);
+               result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_StartFanControl,
+                                       FAN_CONTROL_FUZZY, NULL);
 
                if (PP_CAP(PHM_PlatformCaps_FanSpeedInTableIsRPM))
                        hwmgr->hwmgr_func->set_max_fan_rpm_output(hwmgr,
@@ -164,8 +164,8 @@ int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
                                        advanceFanControlParameters.usMaxFanPWM);
 
        } else {
-               cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_TABLE);
-               result = smum_send_msg_to_smc(hwmgr, PPSMC_StartFanControl);
+               result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_StartFanControl,
+                                       FAN_CONTROL_TABLE, NULL);
        }
 
        if (!result && hwmgr->thermal_controller.
@@ -173,7 +173,8 @@ int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
                result = smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetFanTemperatureTarget,
                                hwmgr->thermal_controller.
-                               advanceFanControlParameters.ucTargetTemperature);
+                               advanceFanControlParameters.ucTargetTemperature,
+                               NULL);
        hwmgr->fan_ctrl_enabled = true;
 
        return result;
@@ -183,7 +184,7 @@ int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
 int smu7_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
 {
        hwmgr->fan_ctrl_enabled = false;
-       return smum_send_msg_to_smc(hwmgr, PPSMC_StopFanControl);
+       return smum_send_msg_to_smc(hwmgr, PPSMC_StopFanControl, NULL);
 }
 
 /**
@@ -372,7 +373,7 @@ static void smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr)
                        CG_THERMAL_INT, THERM_INT_MASK, alert);
 
        /* send message to SMU to enable internal thermal interrupts */
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Thermal_Cntl_Enable);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Thermal_Cntl_Enable, NULL);
 }
 
 /**
@@ -390,7 +391,7 @@ int smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr)
                        CG_THERMAL_INT, THERM_INT_MASK, alert);
 
        /* send message to SMU to disable internal thermal interrupts */
-       return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Thermal_Cntl_Disable);
+       return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Thermal_Cntl_Disable, NULL);
 }
 
 /**
index 019d6a206492b05604e42678d5d41a87f6c4e74a..a6c6a793e98eef8d413c434ccfdd099c2d6b41a5 100644 (file)
@@ -162,8 +162,10 @@ static uint32_t smu8_get_max_sclk_level(struct pp_hwmgr *hwmgr)
        struct smu8_hwmgr *data = hwmgr->backend;
 
        if (data->max_sclk_level == 0) {
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxSclkLevel);
-               data->max_sclk_level = smum_get_argument(hwmgr) + 1;
+               smum_send_msg_to_smc(hwmgr,
+                               PPSMC_MSG_GetMaxSclkLevel,
+                               &data->max_sclk_level);
+               data->max_sclk_level += 1;
        }
 
        return data->max_sclk_level;
@@ -580,7 +582,8 @@ static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr)
        struct smu8_hwmgr *data = hwmgr->backend;
        struct phm_uvd_clock_voltage_dependency_table *table =
                                hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
-       unsigned long clock = 0, level;
+       unsigned long clock = 0;
+       uint32_t level;
 
        if (NULL == table || table->count <= 0)
                return -EINVAL;
@@ -588,8 +591,7 @@ static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr)
        data->uvd_dpm.soft_min_clk = 0;
        data->uvd_dpm.hard_min_clk = 0;
 
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel);
-       level = smum_get_argument(hwmgr);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel, &level);
 
        if (level < table->count)
                clock = table->entries[level].vclk;
@@ -607,7 +609,8 @@ static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr)
        struct smu8_hwmgr *data = hwmgr->backend;
        struct phm_vce_clock_voltage_dependency_table *table =
                                hwmgr->dyn_state.vce_clock_voltage_dependency_table;
-       unsigned long clock = 0, level;
+       unsigned long clock = 0;
+       uint32_t level;
 
        if (NULL == table || table->count <= 0)
                return -EINVAL;
@@ -615,8 +618,7 @@ static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr)
        data->vce_dpm.soft_min_clk = 0;
        data->vce_dpm.hard_min_clk = 0;
 
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel);
-       level = smum_get_argument(hwmgr);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel, &level);
 
        if (level < table->count)
                clock = table->entries[level].ecclk;
@@ -634,7 +636,8 @@ static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr)
        struct smu8_hwmgr *data = hwmgr->backend;
        struct phm_acp_clock_voltage_dependency_table *table =
                                hwmgr->dyn_state.acp_clock_voltage_dependency_table;
-       unsigned long clock = 0, level;
+       unsigned long clock = 0;
+       uint32_t level;
 
        if (NULL == table || table->count <= 0)
                return -EINVAL;
@@ -642,8 +645,7 @@ static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr)
        data->acp_dpm.soft_min_clk = 0;
        data->acp_dpm.hard_min_clk = 0;
 
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel);
-       level = smum_get_argument(hwmgr);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel, &level);
 
        if (level < table->count)
                clock = table->entries[level].acpclk;
@@ -665,7 +667,7 @@ static void smu8_init_power_gate_state(struct pp_hwmgr *hwmgr)
 #ifdef CONFIG_DRM_AMD_ACP
        data->acp_power_gated = false;
 #else
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF, NULL);
        data->acp_power_gated = true;
 #endif
 
@@ -708,7 +710,8 @@ static int smu8_update_sclk_limit(struct pp_hwmgr *hwmgr)
                                                PPSMC_MSG_SetSclkHardMin,
                                                 smu8_get_sclk_level(hwmgr,
                                        data->sclk_dpm.hard_min_clk,
-                                            PPSMC_MSG_SetSclkHardMin));
+                                            PPSMC_MSG_SetSclkHardMin),
+                                                NULL);
        }
 
        clock = data->sclk_dpm.soft_min_clk;
@@ -731,7 +734,8 @@ static int smu8_update_sclk_limit(struct pp_hwmgr *hwmgr)
                                                PPSMC_MSG_SetSclkSoftMin,
                                                smu8_get_sclk_level(hwmgr,
                                        data->sclk_dpm.soft_min_clk,
-                                            PPSMC_MSG_SetSclkSoftMin));
+                                            PPSMC_MSG_SetSclkSoftMin),
+                                               NULL);
        }
 
        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -742,7 +746,8 @@ static int smu8_update_sclk_limit(struct pp_hwmgr *hwmgr)
                                                PPSMC_MSG_SetSclkSoftMax,
                                                smu8_get_sclk_level(hwmgr,
                                        data->sclk_dpm.soft_max_clk,
-                                       PPSMC_MSG_SetSclkSoftMax));
+                                       PPSMC_MSG_SetSclkSoftMax),
+                                               NULL);
        }
 
        return 0;
@@ -760,7 +765,8 @@ static int smu8_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr)
 
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetMinDeepSleepSclk,
-                               clks);
+                               clks,
+                               NULL);
        }
 
        return 0;
@@ -773,7 +779,8 @@ static int smu8_set_watermark_threshold(struct pp_hwmgr *hwmgr)
 
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_SetWatermarkFrequency,
-                                       data->sclk_dpm.soft_max_clk);
+                                       data->sclk_dpm.soft_max_clk,
+                                       NULL);
 
        return 0;
 }
@@ -788,13 +795,15 @@ static int smu8_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable,
 
                        return smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_EnableLowMemoryPstate,
-                                               (lock ? 1 : 0));
+                                               (lock ? 1 : 0),
+                                               NULL);
                } else {
                        PP_DBG_LOG("disable Low Memory PState.\n");
 
                        return smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_DisableLowMemoryPstate,
-                                               (lock ? 1 : 0));
+                                               (lock ? 1 : 0),
+                                               NULL);
                }
        }
 
@@ -814,7 +823,8 @@ static int smu8_disable_nb_dpm(struct pp_hwmgr *hwmgr)
                ret = smum_send_msg_to_smc_with_parameter(
                                                          hwmgr,
                                                          PPSMC_MSG_DisableAllSmuFeatures,
-                                                         dpm_features);
+                                                         dpm_features,
+                                                         NULL);
                if (ret == 0)
                        data->is_nb_dpm_enabled = false;
        }
@@ -835,7 +845,8 @@ static int smu8_enable_nb_dpm(struct pp_hwmgr *hwmgr)
                ret = smum_send_msg_to_smc_with_parameter(
                                                          hwmgr,
                                                          PPSMC_MSG_EnableAllSmuFeatures,
-                                                         dpm_features);
+                                                         dpm_features,
+                                                         NULL);
                if (ret == 0)
                        data->is_nb_dpm_enabled = true;
        }
@@ -953,7 +964,8 @@ static int smu8_start_dpm(struct pp_hwmgr *hwmgr)
 
        return smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_EnableAllSmuFeatures,
-                               SCLK_DPM_MASK);
+                               SCLK_DPM_MASK,
+                               NULL);
 }
 
 static int smu8_stop_dpm(struct pp_hwmgr *hwmgr)
@@ -967,7 +979,8 @@ static int smu8_stop_dpm(struct pp_hwmgr *hwmgr)
                data->dpm_flags &= ~DPMFlags_SCLK_Enabled;
                ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_DisableAllSmuFeatures,
-                                       dpm_features);
+                                       dpm_features,
+                                       NULL);
        }
        return ret;
 }
@@ -983,13 +996,15 @@ static int smu8_program_bootup_state(struct pp_hwmgr *hwmgr)
                                PPSMC_MSG_SetSclkSoftMin,
                                smu8_get_sclk_level(hwmgr,
                                data->sclk_dpm.soft_min_clk,
-                               PPSMC_MSG_SetSclkSoftMin));
+                               PPSMC_MSG_SetSclkSoftMin),
+                               NULL);
 
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetSclkSoftMax,
                                smu8_get_sclk_level(hwmgr,
                                data->sclk_dpm.soft_max_clk,
-                               PPSMC_MSG_SetSclkSoftMax));
+                               PPSMC_MSG_SetSclkSoftMax),
+                               NULL);
 
        return 0;
 }
@@ -1127,13 +1142,15 @@ static int smu8_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
                                        PPSMC_MSG_SetSclkSoftMin,
                                        smu8_get_sclk_level(hwmgr,
                                        data->sclk_dpm.soft_max_clk,
-                                       PPSMC_MSG_SetSclkSoftMin));
+                                       PPSMC_MSG_SetSclkSoftMin),
+                                       NULL);
 
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetSclkSoftMax,
                                smu8_get_sclk_level(hwmgr,
                                data->sclk_dpm.soft_max_clk,
-                               PPSMC_MSG_SetSclkSoftMax));
+                               PPSMC_MSG_SetSclkSoftMax),
+                               NULL);
 
        return 0;
 }
@@ -1167,13 +1184,15 @@ static int smu8_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
                                PPSMC_MSG_SetSclkSoftMin,
                                smu8_get_sclk_level(hwmgr,
                                data->sclk_dpm.soft_min_clk,
-                               PPSMC_MSG_SetSclkSoftMin));
+                               PPSMC_MSG_SetSclkSoftMin),
+                               NULL);
 
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetSclkSoftMax,
                                smu8_get_sclk_level(hwmgr,
                                data->sclk_dpm.soft_max_clk,
-                               PPSMC_MSG_SetSclkSoftMax));
+                               PPSMC_MSG_SetSclkSoftMax),
+                               NULL);
 
        return 0;
 }
@@ -1186,13 +1205,15 @@ static int smu8_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
                        PPSMC_MSG_SetSclkSoftMax,
                        smu8_get_sclk_level(hwmgr,
                        data->sclk_dpm.soft_min_clk,
-                       PPSMC_MSG_SetSclkSoftMax));
+                       PPSMC_MSG_SetSclkSoftMax),
+                       NULL);
 
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetSclkSoftMin,
                                smu8_get_sclk_level(hwmgr,
                                data->sclk_dpm.soft_min_clk,
-                               PPSMC_MSG_SetSclkSoftMin));
+                               PPSMC_MSG_SetSclkSoftMin),
+                               NULL);
 
        return 0;
 }
@@ -1227,7 +1248,7 @@ static int smu8_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
 static int smu8_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr)
 {
        if (PP_CAP(PHM_PlatformCaps_UVDPowerGating))
-               return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF);
+               return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF, NULL);
        return 0;
 }
 
@@ -1237,7 +1258,8 @@ static int smu8_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
                return smum_send_msg_to_smc_with_parameter(
                        hwmgr,
                        PPSMC_MSG_UVDPowerON,
-                       PP_CAP(PHM_PlatformCaps_UVDDynamicPowerGating) ? 1 : 0);
+                       PP_CAP(PHM_PlatformCaps_UVDDynamicPowerGating) ? 1 : 0,
+                       NULL);
        }
 
        return 0;
@@ -1259,15 +1281,20 @@ static int  smu8_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
                        PPSMC_MSG_SetEclkHardMin,
                        smu8_get_eclk_level(hwmgr,
                                data->vce_dpm.hard_min_clk,
-                               PPSMC_MSG_SetEclkHardMin));
+                               PPSMC_MSG_SetEclkHardMin),
+                       NULL);
        } else {
 
                smum_send_msg_to_smc_with_parameter(hwmgr,
-                                       PPSMC_MSG_SetEclkHardMin, 0);
+                                       PPSMC_MSG_SetEclkHardMin,
+                                       0,
+                                       NULL);
                /* disable ECLK DPM 0. Otherwise VCE could hang if
                 * switching SCLK from DPM 0 to 6/7 */
                smum_send_msg_to_smc_with_parameter(hwmgr,
-                                       PPSMC_MSG_SetEclkSoftMin, 1);
+                                       PPSMC_MSG_SetEclkSoftMin,
+                                       1,
+                                       NULL);
        }
        return 0;
 }
@@ -1276,7 +1303,8 @@ static int smu8_dpm_powerdown_vce(struct pp_hwmgr *hwmgr)
 {
        if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
                return smum_send_msg_to_smc(hwmgr,
-                                                    PPSMC_MSG_VCEPowerOFF);
+                                           PPSMC_MSG_VCEPowerOFF,
+                                           NULL);
        return 0;
 }
 
@@ -1284,7 +1312,8 @@ static int smu8_dpm_powerup_vce(struct pp_hwmgr *hwmgr)
 {
        if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
                return smum_send_msg_to_smc(hwmgr,
-                                                    PPSMC_MSG_VCEPowerON);
+                                           PPSMC_MSG_VCEPowerON,
+                                           NULL);
        return 0;
 }
 
@@ -1435,7 +1464,8 @@ static void smu8_hw_print_display_cfg(
 
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetDisplaySizePowerParams,
-                                               data);
+                                               data,
+                                               NULL);
        }
 
        return 0;
@@ -1497,10 +1527,12 @@ static int smu8_force_clock_level(struct pp_hwmgr *hwmgr,
        case PP_SCLK:
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetSclkSoftMin,
-                               mask);
+                               mask,
+                               NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetSclkSoftMax,
-                               mask);
+                               mask,
+                               NULL);
                break;
        default:
                break;
@@ -1753,9 +1785,10 @@ static int smu8_read_sensor(struct pp_hwmgr *hwmgr, int idx,
                *((uint32_t *)value) = 0;
                return 0;
        case AMDGPU_PP_SENSOR_GPU_LOAD:
-               result = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGraphicsActivity);
+               result = smum_send_msg_to_smc(hwmgr,
+                               PPSMC_MSG_GetAverageGraphicsActivity,
+                               &activity_percent);
                if (0 == result) {
-                       activity_percent = cgs_read_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0);
                        activity_percent = activity_percent > 100 ? 100 : activity_percent;
                } else {
                        activity_percent = 50;
@@ -1785,20 +1818,25 @@ static int smu8_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
 {
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_DramAddrHiVirtual,
-                                       mc_addr_hi);
+                                       mc_addr_hi,
+                                       NULL);
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_DramAddrLoVirtual,
-                                       mc_addr_low);
+                                       mc_addr_low,
+                                       NULL);
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_DramAddrHiPhysical,
-                                       virtual_addr_hi);
+                                       virtual_addr_hi,
+                                       NULL);
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_DramAddrLoPhysical,
-                                       virtual_addr_low);
+                                       virtual_addr_low,
+                                       NULL);
 
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_DramBufferSize,
-                                       size);
+                                       size,
+                                       NULL);
        return 0;
 }
 
@@ -1827,12 +1865,16 @@ static int smu8_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
                data->dpm_flags |= DPMFlags_UVD_Enabled;
                dpm_features |= UVD_DPM_MASK;
                smum_send_msg_to_smc_with_parameter(hwmgr,
-                           PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
+                           PPSMC_MSG_EnableAllSmuFeatures,
+                           dpm_features,
+                           NULL);
        } else {
                dpm_features |= UVD_DPM_MASK;
                data->dpm_flags &= ~DPMFlags_UVD_Enabled;
                smum_send_msg_to_smc_with_parameter(hwmgr,
-                          PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
+                          PPSMC_MSG_DisableAllSmuFeatures,
+                          dpm_features,
+                          NULL);
        }
        return 0;
 }
@@ -1854,7 +1896,8 @@ int smu8_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
                                PPSMC_MSG_SetUvdHardMin,
                                smu8_get_uvd_level(hwmgr,
                                        data->uvd_dpm.hard_min_clk,
-                                       PPSMC_MSG_SetUvdHardMin));
+                                       PPSMC_MSG_SetUvdHardMin),
+                               NULL);
 
                        smu8_enable_disable_uvd_dpm(hwmgr, true);
                } else {
@@ -1878,12 +1921,16 @@ static int smu8_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
                data->dpm_flags |= DPMFlags_VCE_Enabled;
                dpm_features |= VCE_DPM_MASK;
                smum_send_msg_to_smc_with_parameter(hwmgr,
-                           PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
+                           PPSMC_MSG_EnableAllSmuFeatures,
+                           dpm_features,
+                           NULL);
        } else {
                dpm_features |= VCE_DPM_MASK;
                data->dpm_flags &= ~DPMFlags_VCE_Enabled;
                smum_send_msg_to_smc_with_parameter(hwmgr,
-                          PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
+                          PPSMC_MSG_DisableAllSmuFeatures,
+                          dpm_features,
+                          NULL);
        }
 
        return 0;
@@ -1898,9 +1945,9 @@ static void smu8_dpm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate)
                return;
 
        if (bgate)
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF, NULL);
        else
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerON);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerON, NULL);
 }
 
 static void smu8_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
index d09690fca45205894e706bc866fa29ee5ff65640..60b5ca974356aa4e4d088bee04330ee3ac4f87d4 100644 (file)
@@ -22,6 +22,7 @@
  */
 
 #include <linux/pci.h>
+#include <linux/reboot.h>
 
 #include "hwmgr.h"
 #include "pp_debug.h"
@@ -557,7 +558,9 @@ void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
                if (req_vddc <= vddc_table->entries[i].vddc) {
                        req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE);
                        smum_send_msg_to_smc_with_parameter(hwmgr,
-                                       PPSMC_MSG_VddC_Request, req_volt);
+                                       PPSMC_MSG_VddC_Request,
+                                       req_volt,
+                                       NULL);
                        return;
                }
        }
@@ -593,37 +596,43 @@ int phm_irq_process(struct amdgpu_device *adev,
        uint32_t src_id = entry->src_id;
 
        if (client_id == AMDGPU_IRQ_CLIENTID_LEGACY) {
-               if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH)
-                       pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
-                                               PCI_BUS_NUM(adev->pdev->devfn),
-                                               PCI_SLOT(adev->pdev->devfn),
-                                               PCI_FUNC(adev->pdev->devfn));
-               else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW)
-                       pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
-                                       PCI_BUS_NUM(adev->pdev->devfn),
-                                       PCI_SLOT(adev->pdev->devfn),
-                                       PCI_FUNC(adev->pdev->devfn));
-               else if (src_id == VISLANDS30_IV_SRCID_GPIO_19)
-                       pr_warn("GPU Critical Temperature Fault detected on PCIe %d:%d.%d!\n",
-                                       PCI_BUS_NUM(adev->pdev->devfn),
-                                       PCI_SLOT(adev->pdev->devfn),
-                                       PCI_FUNC(adev->pdev->devfn));
+               if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH) {
+                       dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
+                       /*
+                        * SW CTF just occurred.
+                        * Try to do a graceful shutdown to prevent further damage.
+                        */
+                       dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
+                       orderly_poweroff(true);
+               } else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW)
+                       dev_emerg(adev->dev, "ERROR: GPU under temperature range detected!\n");
+               else if (src_id == VISLANDS30_IV_SRCID_GPIO_19) {
+                       dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
+                       /*
+                        * HW CTF just occurred. Shutdown to prevent further damage.
+                        */
+                       dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n");
+                       orderly_poweroff(true);
+               }
        } else if (client_id == SOC15_IH_CLIENTID_THM) {
-               if (src_id == 0)
-                       pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
-                                               PCI_BUS_NUM(adev->pdev->devfn),
-                                               PCI_SLOT(adev->pdev->devfn),
-                                               PCI_FUNC(adev->pdev->devfn));
-               else
-                       pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
-                                       PCI_BUS_NUM(adev->pdev->devfn),
-                                       PCI_SLOT(adev->pdev->devfn),
-                                       PCI_FUNC(adev->pdev->devfn));
-       } else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO)
-               pr_warn("GPU Critical Temperature Fault detected on PCIe %d:%d.%d!\n",
-                               PCI_BUS_NUM(adev->pdev->devfn),
-                               PCI_SLOT(adev->pdev->devfn),
-                               PCI_FUNC(adev->pdev->devfn));
+               if (src_id == 0) {
+                       dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
+                       /*
+                        * SW CTF just occurred.
+                        * Try to do a graceful shutdown to prevent further damage.
+                        */
+                       dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
+                       orderly_poweroff(true);
+               } else
+                       dev_emerg(adev->dev, "ERROR: GPU under temperature range detected!\n");
+       } else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) {
+               dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
+               /*
+                * HW CTF just occurred. Shutdown to prevent further damage.
+                */
+               dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n");
+               orderly_poweroff(true);
+       }
 
        return 0;
 }
index d168af4a4d7815f07024bf8609ae71fcdb18b882..46bb16c29cf68f1ac7520ca72cb35e0da52f9cde 100644 (file)
@@ -98,7 +98,7 @@ int vega10_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
        if (state == BACO_STATE_IN) {
                if (soc15_baco_program_registers(hwmgr, pre_baco_tbl,
                                             ARRAY_SIZE(pre_baco_tbl))) {
-                       if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnterBaco))
+                       if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnterBaco, NULL))
                                return -EINVAL;
 
                        if (soc15_baco_program_registers(hwmgr, enter_baco_tbl,
index f29f95be1e56288d7d8affe84de74f4776d26cff..675c7cab7cfc5992121aaab273e02e9df567a07a 100644 (file)
@@ -484,8 +484,9 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
        if (data->registry_data.vr0hot_enabled)
                data->smu_features[GNLD_VR0HOT].supported = true;
 
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
-       hwmgr->smu_version = smum_get_argument(hwmgr);
+       smum_send_msg_to_smc(hwmgr,
+                       PPSMC_MSG_GetSmuVersion,
+                       &hwmgr->smu_version);
                /* ACG firmware has major version 5 */
        if ((hwmgr->smu_version & 0xff000000) == 0x5000000)
                data->smu_features[GNLD_ACG].supported = true;
@@ -503,10 +504,8 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
                data->smu_features[GNLD_PCC_LIMIT].supported = true;
 
        /* Get the SN to turn into a Unique ID */
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32);
-       top32 = smum_get_argument(hwmgr);
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32);
-       bottom32 = smum_get_argument(hwmgr);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
 
        adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
 }
@@ -993,7 +992,10 @@ static int vega10_setup_asic_task(struct pp_hwmgr *hwmgr)
                        "Failed to set up led dpm config!",
                        return -EINVAL);
 
-       smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_NumOfDisplays, 0);
+       smum_send_msg_to_smc_with_parameter(hwmgr,
+                               PPSMC_MSG_NumOfDisplays,
+                               0,
+                               NULL);
 
        return 0;
 }
@@ -2303,16 +2305,15 @@ static int vega10_acg_enable(struct pp_hwmgr *hwmgr)
                                        data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_bitmap))
                        data->smu_features[GNLD_DPM_PREFETCHER].enabled = true;
 
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg, NULL);
 
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc);
-               agc_btc_response = smum_get_argument(hwmgr);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc, &agc_btc_response);
 
                if (1 == agc_btc_response) {
                        if (1 == data->acg_loop_state)
-                               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInClosedLoop);
+                               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInClosedLoop, NULL);
                        else if (2 == data->acg_loop_state)
-                               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInOpenLoop);
+                               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInOpenLoop, NULL);
                        if (0 == vega10_enable_smc_features(hwmgr, true,
                                data->smu_features[GNLD_ACG].smu_feature_bitmap))
                                        data->smu_features[GNLD_ACG].enabled = true;
@@ -2429,11 +2430,9 @@ static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr)
        struct vega10_hwmgr *data = hwmgr->backend;
        AvfsFuseOverride_t *avfs_fuse_table = &(data->smc_state_table.avfs_fuse_override_table);
 
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32);
-       top32 = smum_get_argument(hwmgr);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
 
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32);
-       bottom32 = smum_get_argument(hwmgr);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
 
        serial_number = ((uint64_t)bottom32 << 32) | top32;
 
@@ -2610,14 +2609,16 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
                if (0 != boot_up_values.usVddc) {
                        smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetFloorSocVoltage,
-                                               (boot_up_values.usVddc * 4));
+                                               (boot_up_values.usVddc * 4),
+                                               NULL);
                        data->vbios_boot_state.bsoc_vddc_lock = true;
                } else {
                        data->vbios_boot_state.bsoc_vddc_lock = false;
                }
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetMinDeepSleepDcefclk,
-                       (uint32_t)(data->vbios_boot_state.dcef_clock / 100));
+                       (uint32_t)(data->vbios_boot_state.dcef_clock / 100),
+                               NULL);
        }
 
        result = vega10_populate_avfs_parameters(hwmgr);
@@ -2904,7 +2905,8 @@ static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
 
        if (data->vbios_boot_state.bsoc_vddc_lock) {
                smum_send_msg_to_smc_with_parameter(hwmgr,
-                                               PPSMC_MSG_SetFloorSocVoltage, 0);
+                                               PPSMC_MSG_SetFloorSocVoltage, 0,
+                                               NULL);
                data->vbios_boot_state.bsoc_vddc_lock = false;
        }
 
@@ -2947,7 +2949,8 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
                vega10_enable_disable_PCC_limit_feature(hwmgr, true);
 
                smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_ConfigureTelemetry, data->config_telemetry);
+                       PPSMC_MSG_ConfigureTelemetry, data->config_telemetry,
+                       NULL);
 
                tmp_result = vega10_construct_voltage_tables(hwmgr);
                PP_ASSERT_WITH_CODE(!tmp_result,
@@ -3528,7 +3531,8 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
                                data->dpm_table.gfx_table.dpm_state.soft_min_level) {
                        smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetSoftMinGfxclkByIndex,
-                               data->smc_state_table.gfx_boot_level);
+                               data->smc_state_table.gfx_boot_level,
+                               NULL);
 
                        data->dpm_table.gfx_table.dpm_state.soft_min_level =
                                        data->smc_state_table.gfx_boot_level;
@@ -3543,11 +3547,13 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
                                socclk_idx = vega10_get_soc_index_for_max_uclk(hwmgr);
                                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMinSocclkByIndex,
-                                               socclk_idx);
+                                               socclk_idx,
+                                               NULL);
                        } else {
                                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMinUclkByIndex,
-                                               data->smc_state_table.mem_boot_level);
+                                               data->smc_state_table.mem_boot_level,
+                                               NULL);
                        }
                        data->dpm_table.mem_table.dpm_state.soft_min_level =
                                        data->smc_state_table.mem_boot_level;
@@ -3562,7 +3568,8 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
                                data->dpm_table.soc_table.dpm_state.soft_min_level) {
                        smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetSoftMinSocclkByIndex,
-                               data->smc_state_table.soc_boot_level);
+                               data->smc_state_table.soc_boot_level,
+                               NULL);
                        data->dpm_table.soc_table.dpm_state.soft_min_level =
                                        data->smc_state_table.soc_boot_level;
                }
@@ -3582,7 +3589,8 @@ static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
                        data->dpm_table.gfx_table.dpm_state.soft_max_level) {
                        smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetSoftMaxGfxclkByIndex,
-                               data->smc_state_table.gfx_max_level);
+                               data->smc_state_table.gfx_max_level,
+                               NULL);
                        data->dpm_table.gfx_table.dpm_state.soft_max_level =
                                        data->smc_state_table.gfx_max_level;
                }
@@ -3593,7 +3601,8 @@ static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
                        data->dpm_table.mem_table.dpm_state.soft_max_level) {
                        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_SetSoftMaxUclkByIndex,
-                                       data->smc_state_table.mem_max_level);
+                                       data->smc_state_table.mem_max_level,
+                                       NULL);
                        data->dpm_table.mem_table.dpm_state.soft_max_level =
                                        data->smc_state_table.mem_max_level;
                }
@@ -3607,7 +3616,8 @@ static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
                        data->dpm_table.soc_table.dpm_state.soft_max_level) {
                        smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetSoftMaxSocclkByIndex,
-                               data->smc_state_table.soc_max_level);
+                               data->smc_state_table.soc_max_level,
+                               NULL);
                        data->dpm_table.soc_table.dpm_state.soft_max_level =
                                        data->smc_state_table.soc_max_level;
                }
@@ -3694,7 +3704,8 @@ static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
                /* This message will also enable SmcToHost Interrupt */
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetLowGfxclkInterruptThreshold,
-                               (uint32_t)low_sclk_interrupt_threshold);
+                               (uint32_t)low_sclk_interrupt_threshold,
+                               NULL);
        }
 
        return 0;
@@ -3801,8 +3812,7 @@ static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr,
        if (!query)
                return -EINVAL;
 
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr);
-       value = smum_get_argument(hwmgr);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr, &value);
 
        /* SMC returning actual watts, keep consistent with legacy asics, low 8 bit as 8 fractional bits */
        *query = value << 8;
@@ -3822,13 +3832,11 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
 
        switch (idx) {
        case AMDGPU_PP_SENSOR_GFX_SCLK:
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGfxclkActualFrequency);
-               sclk_mhz = smum_get_argument(hwmgr);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGfxclkActualFrequency, &sclk_mhz);
                *((uint32_t *)value) = sclk_mhz * 100;
                break;
        case AMDGPU_PP_SENSOR_GFX_MCLK:
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex);
-               mclk_idx = smum_get_argument(hwmgr);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &mclk_idx);
                if (mclk_idx < dpm_table->mem_table.count) {
                        *((uint32_t *)value) = dpm_table->mem_table.dpm_levels[mclk_idx].value;
                        *size = 4;
@@ -3837,8 +3845,8 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
                }
                break;
        case AMDGPU_PP_SENSOR_GPU_LOAD:
-               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0);
-               activity_percent = smum_get_argument(hwmgr);
+               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0,
+                                               &activity_percent);
                *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
                *size = 4;
                break;
@@ -3847,14 +3855,14 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
                *size = 4;
                break;
        case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetTemperatureHotspot);
-               *((uint32_t *)value) = smum_get_argument(hwmgr) *
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetTemperatureHotspot, (uint32_t *)value);
+               *((uint32_t *)value) = *((uint32_t *)value) *
                        PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
                *size = 4;
                break;
        case AMDGPU_PP_SENSOR_MEM_TEMP:
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetTemperatureHBM);
-               *((uint32_t *)value) = smum_get_argument(hwmgr) *
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetTemperatureHBM, (uint32_t *)value);
+               *((uint32_t *)value) = *((uint32_t *)value) *
                        PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
                *size = 4;
                break;
@@ -3893,7 +3901,8 @@ static void vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr,
 {
        smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetUclkFastSwitch,
-                       has_disp ? 1 : 0);
+                       has_disp ? 1 : 0,
+                       NULL);
 }
 
 int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
@@ -3928,7 +3937,8 @@ int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
                clk_request = (clk_freq << 16) | clk_select;
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_RequestDisplayClockByFreq,
-                               clk_request);
+                               clk_request,
+                               NULL);
        }
 
        return result;
@@ -3990,7 +4000,8 @@ static int vega10_notify_smc_display_config_after_ps_adjustment(
                if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) {
                        smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
-                                       min_clocks.dcefClockInSR / 100);
+                                       min_clocks.dcefClockInSR / 100,
+                                       NULL);
                } else {
                        pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
                }
@@ -4000,7 +4011,8 @@ static int vega10_notify_smc_display_config_after_ps_adjustment(
 
        if (min_clocks.memoryClock != 0) {
                idx = vega10_get_uclk_index(hwmgr, mclk_table, min_clocks.memoryClock);
-               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMinUclkByIndex, idx);
+               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMinUclkByIndex, idx,
+                                               NULL);
                data->dpm_table.mem_table.dpm_state.soft_min_level= idx;
        }
 
@@ -4541,8 +4553,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
                if (data->registry_data.sclk_dpm_key_disabled)
                        break;
 
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex);
-               now = smum_get_argument(hwmgr);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex, &now);
 
                if (hwmgr->pp_one_vf &&
                    (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK))
@@ -4558,8 +4569,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
                if (data->registry_data.mclk_dpm_key_disabled)
                        break;
 
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex);
-               now = smum_get_argument(hwmgr);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now);
 
                for (i = 0; i < mclk_table->count; i++)
                        size += sprintf(buf + size, "%d: %uMhz %s\n",
@@ -4570,8 +4580,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
                if (data->registry_data.socclk_dpm_key_disabled)
                        break;
 
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex);
-               now = smum_get_argument(hwmgr);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now);
 
                for (i = 0; i < soc_table->count; i++)
                        size += sprintf(buf + size, "%d: %uMhz %s\n",
@@ -4583,8 +4592,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
                        break;
 
                smum_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK);
-               now = smum_get_argument(hwmgr);
+                               PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK, &now);
 
                for (i = 0; i < dcef_table->count; i++)
                        size += sprintf(buf + size, "%d: %uMhz %s\n",
@@ -4593,8 +4601,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
                                        "*" : "");
                break;
        case PP_PCIE:
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentLinkIndex);
-               now = smum_get_argument(hwmgr);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentLinkIndex, &now);
 
                for (i = 0; i < pcie_table->count; i++)
                        size += sprintf(buf + size, "%d: %s %s\n", i,
@@ -4658,7 +4665,8 @@ static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
 
        if (data->water_marks_bitmap & WaterMarksLoaded) {
                smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display);
+                       PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display,
+                       NULL);
        }
 
        return result;
@@ -4924,21 +4932,26 @@ static int vega10_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
 {
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_SetSystemVirtualDramAddrHigh,
-                                       virtual_addr_hi);
+                                       virtual_addr_hi,
+                                       NULL);
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_SetSystemVirtualDramAddrLow,
-                                       virtual_addr_low);
+                                       virtual_addr_low,
+                                       NULL);
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_DramLogSetDramAddrHigh,
-                                       mc_addr_hi);
+                                       mc_addr_hi,
+                                       NULL);
 
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_DramLogSetDramAddrLow,
-                                       mc_addr_low);
+                                       mc_addr_low,
+                                       NULL);
 
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_DramLogSetDramSize,
-                                       size);
+                                       size,
+                                       NULL);
        return 0;
 }
 
@@ -5040,12 +5053,14 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_SetCustomGfxDpmParameters,
                                        busy_set_point | FPS<<8 |
-                                       use_rlc_busy << 16 | min_active_level<<24);
+                                       use_rlc_busy << 16 | min_active_level<<24,
+                                       NULL);
        }
 
 out:
        smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
-                                               1 << power_profile_mode);
+                                               1 << power_profile_mode,
+                                               NULL);
        hwmgr->power_profile_mode = power_profile_mode;
 
        return 0;
@@ -5302,7 +5317,7 @@ static int vega10_set_mp1_state(struct pp_hwmgr *hwmgr,
                return 0;
        }
 
-       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg)) == 0,
+       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg, NULL)) == 0,
                            "[PrepareMp1] Failed!",
                            return ret);
 
index 0a677d4bc87b9a4621cef0edae6785929e03e63d..9757d47dd6b815bfe208676b435551b830fce5ff 100644 (file)
@@ -651,18 +651,6 @@ static const struct vega10_didt_config_reg   PSMSEEDCStallDelayConfig_Vega10[] =
        {   0xFFFFFFFF  }  /* End of list */
 };
 
-static const struct vega10_didt_config_reg   PSMSEEDCThresholdConfig_Vega10[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- *      Offset                             Mask                                                 Shift                                                  Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
-       /* SQ EDC THRESHOLD */
-       {   ixDIDT_SQ_EDC_THRESHOLD,           DIDT_SQ_EDC_THRESHOLD__EDC_THRESHOLD_MASK,           DIDT_SQ_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT,            0x0000 },
-
-       {   0xFFFFFFFF  }  /* End of list */
-};
-
 static const struct vega10_didt_config_reg   PSMSEEDCCtrlResetConfig_Vega10[] =
 {
 /* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -707,17 +695,6 @@ static const struct vega10_didt_config_reg   PSMSEEDCCtrlConfig_Vega10[] =
        {   0xFFFFFFFF  }  /* End of list */
 };
 
-static const struct vega10_didt_config_reg   PSMGCEDCThresholdConfig_vega10[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- *      Offset                             Mask                                                 Shift                                                  Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
-       {   mmGC_EDC_THRESHOLD,                GC_EDC_THRESHOLD__EDC_THRESHOLD_MASK,                GC_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT,                 0x0000000 },
-
-       {   0xFFFFFFFF  }  /* End of list */
-};
-
 static const struct vega10_didt_config_reg   PSMGCEDCDroopCtrlConfig_vega10[] =
 {
 /* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -925,7 +902,8 @@ static void vega10_didt_set_mask(struct pp_hwmgr *hwmgr, const bool enable)
 
        /* For Vega10, SMC does not support any mask yet. */
        if (enable)
-               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ConfigureGfxDidt, didt_block_info);
+               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ConfigureGfxDidt, didt_block_info,
+                                               NULL);
 
 }
 
@@ -1327,7 +1305,8 @@ int vega10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
 
        if (data->registry_data.enable_pkg_pwr_tracking_feature)
                smum_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_SetPptLimit, n);
+                               PPSMC_MSG_SetPptLimit, n,
+                               NULL);
 
        return 0;
 }
@@ -1393,7 +1372,8 @@ static void vega10_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr,
                uint32_t adjust_percent)
 {
        smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_OverDriveSetPercentage, adjust_percent);
+                       PPSMC_MSG_OverDriveSetPercentage, adjust_percent,
+                       NULL);
 }
 
 int vega10_power_control_set_level(struct pp_hwmgr *hwmgr)
index ba8763daa3808b2a6fcd57f7ff6315d6bc8c3e5e..7783c7fd7ccb0a58e906d0440f25eab9e2612315 100644 (file)
@@ -31,8 +31,7 @@
 
 static int vega10_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
 {
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentRpm);
-       *current_rpm = smum_get_argument(hwmgr);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentRpm, current_rpm);
        return 0;
 }
 
@@ -520,7 +519,8 @@ int vega10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
 
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetFanTemperatureTarget,
-                               (uint32_t)table->FanTargetTemperature);
+                               (uint32_t)table->FanTargetTemperature,
+                               NULL);
 
        table->FanPwmMin = hwmgr->thermal_controller.
                        advanceFanControlParameters.usPWMMin * 255 / 100;
index 9d8ca94a8f0c1ef9b16bd9b41d889e54ae971683..bc53cce4f32d3995ba132d4b47079bd200282f9d 100644 (file)
@@ -96,7 +96,7 @@ int vega12_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
        if (state == BACO_STATE_IN) {
                if (soc15_baco_program_registers(hwmgr, pre_baco_tbl,
                                             ARRAY_SIZE(pre_baco_tbl))) {
-                       if (smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnterBaco, 0))
+                       if (smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnterBaco, 0, NULL))
                                return -EINVAL;
 
                        if (soc15_baco_program_registers(hwmgr, enter_baco_tbl,
index aca61d1ff3c2a692b8d7bc7bed17b80e35154cf5..f4d1692cccf3a388b013b463f676941b1ee42902 100644 (file)
@@ -357,10 +357,8 @@ static void vega12_init_dpm_defaults(struct pp_hwmgr *hwmgr)
        }
 
        /* Get the SN to turn into a Unique ID */
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32);
-       top32 = smum_get_argument(hwmgr);
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32);
-       bottom32 = smum_get_argument(hwmgr);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
 
        adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
 }
@@ -483,16 +481,12 @@ static int vega12_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
 
        ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_GetDpmFreqByIndex,
-                       (clk_id << 16 | 0xFF));
+                       (clk_id << 16 | 0xFF),
+                       num_of_levels);
        PP_ASSERT_WITH_CODE(!ret,
                        "[GetNumOfDpmLevel] failed to get dpm levels!",
                        return ret);
 
-       *num_of_levels = smum_get_argument(hwmgr);
-       PP_ASSERT_WITH_CODE(*num_of_levels > 0,
-                       "[GetNumOfDpmLevel] number of clk levels is invalid!",
-                       return -EINVAL);
-
        return ret;
 }
 
@@ -504,12 +498,11 @@ static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
         *Lower 16 bits specify the level
         */
        PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
-               PPSMC_MSG_GetDpmFreqByIndex, (clkID << 16 | index)) == 0,
+               PPSMC_MSG_GetDpmFreqByIndex, (clkID << 16 | index),
+               clock) == 0,
                "[GetDpmFrequencyByIndex] Failed to get dpm frequency from SMU!",
                return -EINVAL);
 
-       *clock = smum_get_argument(hwmgr);
-
        return 0;
 }
 
@@ -749,7 +742,8 @@ static int vega12_init_smc_table(struct pp_hwmgr *hwmgr)
                data->vbios_boot_state.vclock = boot_up_values.ulVClk;
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetMinDeepSleepDcefclk,
-                       (uint32_t)(data->vbios_boot_state.dcef_clock / 100));
+                       (uint32_t)(data->vbios_boot_state.dcef_clock / 100),
+                               NULL);
        }
 
        memcpy(pp_table, pptable_information->smc_pptable, sizeof(PPTable_t));
@@ -767,11 +761,10 @@ static int vega12_run_acg_btc(struct pp_hwmgr *hwmgr)
        uint32_t result;
 
        PP_ASSERT_WITH_CODE(
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc) == 0,
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc, &result) == 0,
                "[Run_ACG_BTC] Attempt to run ACG BTC failed!",
                return -EINVAL);
 
-       result = smum_get_argument(hwmgr);
        PP_ASSERT_WITH_CODE(result == 1,
                        "Failed to run ACG BTC!", return -EINVAL);
 
@@ -792,12 +785,14 @@ static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
                                (allowed_features_low |= ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_LOW_SHIFT) & 0xFFFFFFFF));
 
        PP_ASSERT_WITH_CODE(
-               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high) == 0,
+               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high,
+                       NULL) == 0,
                "[SetAllowedFeaturesMask] Attempt to set allowed features mask (high) failed!",
                return -1);
 
        PP_ASSERT_WITH_CODE(
-               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low) == 0,
+               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low,
+                       NULL) == 0,
                "[SetAllowedFeaturesMask] Attempt to set allowed features mask (low) failed!",
                return -1);
 
@@ -828,7 +823,7 @@ static int vega12_enable_all_smu_features(struct pp_hwmgr *hwmgr)
        bool enabled;
 
        PP_ASSERT_WITH_CODE(
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAllSmuFeatures) == 0,
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAllSmuFeatures, NULL) == 0,
                "[EnableAllSMUFeatures] Failed to enable all smu features!",
                return -1);
 
@@ -854,7 +849,7 @@ static int vega12_disable_all_smu_features(struct pp_hwmgr *hwmgr)
        bool enabled;
 
        PP_ASSERT_WITH_CODE(
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableAllSmuFeatures) == 0,
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableAllSmuFeatures, NULL) == 0,
                "[DisableAllSMUFeatures] Failed to disable all smu features!",
                return -1);
 
@@ -879,7 +874,8 @@ static int vega12_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr,
                uint32_t adjust_percent)
 {
        return smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_OverDriveSetPercentage, adjust_percent);
+                       PPSMC_MSG_OverDriveSetPercentage, adjust_percent,
+                       NULL);
 }
 
 static int vega12_power_control_set_level(struct pp_hwmgr *hwmgr)
@@ -902,24 +898,24 @@ static int vega12_get_all_clock_ranges_helper(struct pp_hwmgr *hwmgr,
 {
        /* AC Max */
        PP_ASSERT_WITH_CODE(
-               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clkid << 16)) == 0,
+               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clkid << 16),
+                       &(clock->ACMax)) == 0,
                "[GetClockRanges] Failed to get max ac clock from SMC!",
                return -EINVAL);
-       clock->ACMax = smum_get_argument(hwmgr);
 
        /* AC Min */
        PP_ASSERT_WITH_CODE(
-               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clkid << 16)) == 0,
+               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clkid << 16),
+                       &(clock->ACMin)) == 0,
                "[GetClockRanges] Failed to get min ac clock from SMC!",
                return -EINVAL);
-       clock->ACMin = smum_get_argument(hwmgr);
 
        /* DC Max */
        PP_ASSERT_WITH_CODE(
-               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDcModeMaxDpmFreq, (clkid << 16)) == 0,
+               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDcModeMaxDpmFreq, (clkid << 16),
+                       &(clock->DCMax)) == 0,
                "[GetClockRanges] Failed to get max dc clock from SMC!",
                return -EINVAL);
-       clock->DCMax = smum_get_argument(hwmgr);
 
        return 0;
 }
@@ -944,7 +940,7 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
        int tmp_result, result = 0;
 
        smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_NumOfDisplays, 0);
+                       PPSMC_MSG_NumOfDisplays, 0, NULL);
 
        result = vega12_set_allowed_featuresmask(hwmgr);
        PP_ASSERT_WITH_CODE(result == 0,
@@ -1043,7 +1039,8 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
                min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level;
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMinByFreq,
-                                       (PPCLK_GFXCLK << 16) | (min_freq & 0xffff))),
+                                       (PPCLK_GFXCLK << 16) | (min_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft min gfxclk !",
                                        return ret);
        }
@@ -1052,14 +1049,16 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
                min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level;
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMinByFreq,
-                                       (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
+                                       (PPCLK_UCLK << 16) | (min_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft min memclk !",
                                        return ret);
 
                min_freq = data->dpm_table.mem_table.dpm_state.hard_min_level;
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetHardMinByFreq,
-                                       (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
+                                       (PPCLK_UCLK << 16) | (min_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set hard min memclk !",
                                        return ret);
        }
@@ -1069,7 +1068,8 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMinByFreq,
-                                       (PPCLK_VCLK << 16) | (min_freq & 0xffff))),
+                                       (PPCLK_VCLK << 16) | (min_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft min vclk!",
                                        return ret);
 
@@ -1077,7 +1077,8 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMinByFreq,
-                                       (PPCLK_DCLK << 16) | (min_freq & 0xffff))),
+                                       (PPCLK_DCLK << 16) | (min_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft min dclk!",
                                        return ret);
        }
@@ -1087,7 +1088,8 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMinByFreq,
-                                       (PPCLK_ECLK << 16) | (min_freq & 0xffff))),
+                                       (PPCLK_ECLK << 16) | (min_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft min eclk!",
                                        return ret);
        }
@@ -1097,7 +1099,8 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMinByFreq,
-                                       (PPCLK_SOCCLK << 16) | (min_freq & 0xffff))),
+                                       (PPCLK_SOCCLK << 16) | (min_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft min socclk!",
                                        return ret);
        }
@@ -1107,7 +1110,8 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetHardMinByFreq,
-                                       (PPCLK_DCEFCLK << 16) | (min_freq & 0xffff))),
+                                       (PPCLK_DCEFCLK << 16) | (min_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set hard min dcefclk!",
                                        return ret);
        }
@@ -1127,7 +1131,8 @@ static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
-                                       (PPCLK_GFXCLK << 16) | (max_freq & 0xffff))),
+                                       (PPCLK_GFXCLK << 16) | (max_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft max gfxclk!",
                                        return ret);
        }
@@ -1137,7 +1142,8 @@ static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
-                                       (PPCLK_UCLK << 16) | (max_freq & 0xffff))),
+                                       (PPCLK_UCLK << 16) | (max_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft max memclk!",
                                        return ret);
        }
@@ -1147,14 +1153,16 @@ static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
-                                       (PPCLK_VCLK << 16) | (max_freq & 0xffff))),
+                                       (PPCLK_VCLK << 16) | (max_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft max vclk!",
                                        return ret);
 
                max_freq = data->dpm_table.dclk_table.dpm_state.soft_max_level;
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
-                                       (PPCLK_DCLK << 16) | (max_freq & 0xffff))),
+                                       (PPCLK_DCLK << 16) | (max_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft max dclk!",
                                        return ret);
        }
@@ -1164,7 +1172,8 @@ static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
-                                       (PPCLK_ECLK << 16) | (max_freq & 0xffff))),
+                                       (PPCLK_ECLK << 16) | (max_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft max eclk!",
                                        return ret);
        }
@@ -1174,7 +1183,8 @@ static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
-                                       (PPCLK_SOCCLK << 16) | (max_freq & 0xffff))),
+                                       (PPCLK_SOCCLK << 16) | (max_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft max socclk!",
                                        return ret);
        }
@@ -1287,10 +1297,10 @@ static int vega12_get_current_gfx_clk_freq(struct pp_hwmgr *hwmgr, uint32_t *gfx
        *gfx_freq = 0;
 
        PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16)) == 0,
+                       PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16),
+                       &gfx_clk) == 0,
                        "[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!",
                        return -EINVAL);
-       gfx_clk = smum_get_argument(hwmgr);
 
        *gfx_freq = gfx_clk * 100;
 
@@ -1304,10 +1314,10 @@ static int vega12_get_current_mclk_freq(struct pp_hwmgr *hwmgr, uint32_t *mclk_f
        *mclk_freq = 0;
 
        PP_ASSERT_WITH_CODE(
-                       smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_UCLK << 16)) == 0,
+                       smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_UCLK << 16),
+                               &mem_clk) == 0,
                        "[GetCurrentMClkFreq] Attempt to get Current MCLK Frequency Failed!",
                        return -EINVAL);
-       mem_clk = smum_get_argument(hwmgr);
 
        *mclk_freq = mem_clk * 100;
 
@@ -1420,7 +1430,8 @@ static int vega12_notify_smc_display_change(struct pp_hwmgr *hwmgr,
        if (data->smu_features[GNLD_DPM_UCLK].enabled)
                return smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetUclkFastSwitch,
-                       has_disp ? 1 : 0);
+                       has_disp ? 1 : 0,
+                       NULL);
 
        return 0;
 }
@@ -1459,7 +1470,8 @@ int vega12_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
                        clk_request = (clk_select << 16) | clk_freq;
                        result = smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_SetHardMinByFreq,
-                                       clk_request);
+                                       clk_request,
+                                       NULL);
                }
        }
 
@@ -1493,7 +1505,8 @@ static int vega12_notify_smc_display_config_after_ps_adjustment(
                                PP_ASSERT_WITH_CODE(
                                        !smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
-                                       min_clocks.dcefClockInSR /100),
+                                       min_clocks.dcefClockInSR /100,
+                                       NULL),
                                        "Attempt to set divider for DCEFCLK Failed!",
                                        return -1);
                } else {
@@ -2124,10 +2137,10 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
        case PP_SOCCLK:
                PP_ASSERT_WITH_CODE(
                                smum_send_msg_to_smc_with_parameter(hwmgr,
-                                       PPSMC_MSG_GetDpmClockFreq, (PPCLK_SOCCLK << 16)) == 0,
+                                       PPSMC_MSG_GetDpmClockFreq, (PPCLK_SOCCLK << 16),
+                                       &now) == 0,
                                "Attempt to get Current SOCCLK Frequency Failed!",
                                return -EINVAL);
-               now = smum_get_argument(hwmgr);
 
                PP_ASSERT_WITH_CODE(
                                vega12_get_socclocks(hwmgr, &clocks) == 0,
@@ -2142,10 +2155,10 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
        case PP_DCEFCLK:
                PP_ASSERT_WITH_CODE(
                                smum_send_msg_to_smc_with_parameter(hwmgr,
-                                       PPSMC_MSG_GetDpmClockFreq, (PPCLK_DCEFCLK << 16)) == 0,
+                                       PPSMC_MSG_GetDpmClockFreq, (PPCLK_DCEFCLK << 16),
+                                       &now) == 0,
                                "Attempt to get Current DCEFCLK Frequency Failed!",
                                return -EINVAL);
-               now = smum_get_argument(hwmgr);
 
                PP_ASSERT_WITH_CODE(
                                vega12_get_dcefclocks(hwmgr, &clocks) == 0,
@@ -2343,7 +2356,8 @@ static int vega12_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr,
                dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetHardMinByFreq,
-                               (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)),
+                               (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level,
+                               NULL)),
                                "[SetUclkToHightestDpmLevel] Set hard min uclk failed!",
                                return ret);
        }
@@ -2357,7 +2371,8 @@ static int vega12_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
        int ret = 0;
 
        smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_NumOfDisplays, 0);
+                       PPSMC_MSG_NumOfDisplays, 0,
+                       NULL);
 
        ret = vega12_set_uclk_to_highest_dpm_level(hwmgr,
                        &data->dpm_table.mem_table);
@@ -2383,7 +2398,8 @@ static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
                data->smu_features[GNLD_DPM_DCEFCLK].supported &&
                data->smu_features[GNLD_DPM_SOCCLK].supported)
                smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display);
+                       PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display,
+                       NULL);
 
        return result;
 }
@@ -2555,21 +2571,26 @@ static int vega12_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
 {
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_SetSystemVirtualDramAddrHigh,
-                                       virtual_addr_hi);
+                                       virtual_addr_hi,
+                                       NULL);
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_SetSystemVirtualDramAddrLow,
-                                       virtual_addr_low);
+                                       virtual_addr_low,
+                                       NULL);
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_DramLogSetDramAddrHigh,
-                                       mc_addr_hi);
+                                       mc_addr_hi,
+                                       NULL);
 
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_DramLogSetDramAddrLow,
-                                       mc_addr_low);
+                                       mc_addr_low,
+                                       NULL);
 
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_DramLogSetDramSize,
-                                       size);
+                                       size,
+                                       NULL);
        return 0;
 }
 
@@ -2605,7 +2626,7 @@ static int vega12_enable_gfx_off(struct pp_hwmgr *hwmgr)
        int ret = 0;
 
        if (data->gfxoff_controlled_by_driver)
-               ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_AllowGfxOff);
+               ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_AllowGfxOff, NULL);
 
        return ret;
 }
@@ -2617,7 +2638,7 @@ static int vega12_disable_gfx_off(struct pp_hwmgr *hwmgr)
        int ret = 0;
 
        if (data->gfxoff_controlled_by_driver)
-               ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisallowGfxOff);
+               ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisallowGfxOff, NULL);
 
        return ret;
 }
@@ -2654,7 +2675,7 @@ static int vega12_set_mp1_state(struct pp_hwmgr *hwmgr,
                return 0;
        }
 
-       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg)) == 0,
+       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg, NULL)) == 0,
                            "[PrepareMp1] Failed!",
                            return ret);
 
index 904eb2c9155b4b45ff21daee35eed31146cdd79f..c85806a6f62e3f6dcb185a9e6e6d38bbd9bf646e 100644 (file)
 static int vega12_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
 {
        PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
-                               PPSMC_MSG_GetCurrentRpm),
+                               PPSMC_MSG_GetCurrentRpm,
+                               current_rpm),
                        "Attempt to get current RPM from SMC Failed!",
                        return -EINVAL);
-       *current_rpm = smum_get_argument(hwmgr);
 
        return 0;
 }
@@ -259,7 +259,8 @@ int vega12_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
 
        ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetFanTemperatureTarget,
-                               (uint32_t)table->FanTargetTemperature);
+                               (uint32_t)table->FanTargetTemperature,
+                               NULL);
 
        return ret;
 }
index 9b5e72bdceca5ad1c8dd10285128bc00d811af31..2a28c9df15a02070eddaf764b5e526214df1db1f 100644 (file)
@@ -91,16 +91,16 @@ int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
                        WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
 
                        if(smum_send_msg_to_smc_with_parameter(hwmgr,
-                                       PPSMC_MSG_EnterBaco, 0))
+                                       PPSMC_MSG_EnterBaco, 0, NULL))
                                return -EINVAL;
                } else {
                        if(smum_send_msg_to_smc_with_parameter(hwmgr,
-                                       PPSMC_MSG_EnterBaco, 1))
+                                       PPSMC_MSG_EnterBaco, 1, NULL))
                                return -EINVAL;
                }
 
        } else if (state == BACO_STATE_OUT) {
-               if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ExitBaco))
+               if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ExitBaco, NULL))
                        return -EINVAL;
                if (!soc15_baco_program_registers(hwmgr, clean_baco_tbl,
                                                     ARRAY_SIZE(clean_baco_tbl)))
@@ -118,5 +118,5 @@ int vega20_baco_apply_vdci_flush_workaround(struct pp_hwmgr *hwmgr)
        if (ret)
                return ret;
 
-       return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_BacoWorkAroundFlushVDCI);
+       return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_BacoWorkAroundFlushVDCI, NULL);
 }
index 08b6ba39a6d7c3cdaa088486c7e26b17488f1683..9ff470f1b826cd570339eba862827b6c413bc659 100644 (file)
@@ -92,8 +92,7 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
         */
        data->registry_data.disallowed_features = 0xE0041C00;
        /* ECC feature should be disabled on old SMUs */
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
-       hwmgr->smu_version = smum_get_argument(hwmgr);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion, &hwmgr->smu_version);
        if (hwmgr->smu_version < 0x282100)
                data->registry_data.disallowed_features |= FEATURE_ECC_MASK;
 
@@ -400,10 +399,8 @@ static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr)
        }
 
        /* Get the SN to turn into a Unique ID */
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32);
-       top32 = smum_get_argument(hwmgr);
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32);
-       bottom32 = smum_get_argument(hwmgr);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
 
        adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
 }
@@ -527,16 +524,12 @@ static int vega20_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
 
        ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_GetDpmFreqByIndex,
-                       (clk_id << 16 | 0xFF));
+                       (clk_id << 16 | 0xFF),
+                       num_of_levels);
        PP_ASSERT_WITH_CODE(!ret,
                        "[GetNumOfDpmLevel] failed to get dpm levels!",
                        return ret);
 
-       *num_of_levels = smum_get_argument(hwmgr);
-       PP_ASSERT_WITH_CODE(*num_of_levels > 0,
-                       "[GetNumOfDpmLevel] number of clk levels is invalid!",
-                       return -EINVAL);
-
        return ret;
 }
 
@@ -547,16 +540,12 @@ static int vega20_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
 
        ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_GetDpmFreqByIndex,
-                       (clk_id << 16 | index));
+                       (clk_id << 16 | index),
+                       clk);
        PP_ASSERT_WITH_CODE(!ret,
                        "[GetDpmFreqByIndex] failed to get dpm freq by index!",
                        return ret);
 
-       *clk = smum_get_argument(hwmgr);
-       PP_ASSERT_WITH_CODE(*clk,
-                       "[GetDpmFreqByIndex] clk value is invalid!",
-                       return -EINVAL);
-
        return ret;
 }
 
@@ -813,7 +802,8 @@ static int vega20_init_smc_table(struct pp_hwmgr *hwmgr)
 
        smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetMinDeepSleepDcefclk,
-               (uint32_t)(data->vbios_boot_state.dcef_clock / 100));
+               (uint32_t)(data->vbios_boot_state.dcef_clock / 100),
+                       NULL);
 
        memcpy(pp_table, pptable_information->smc_pptable, sizeof(PPTable_t));
 
@@ -868,7 +858,8 @@ static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr)
         */
        smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width;
        ret = smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_OverridePcieParameters, smu_pcie_arg);
+                       PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
+                       NULL);
        PP_ASSERT_WITH_CODE(!ret,
                "[OverridePcieParameters] Attempt to override pcie params failed!",
                return ret);
@@ -899,13 +890,13 @@ static int vega20_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
                                  & 0xFFFFFFFF));
 
        ret = smum_send_msg_to_smc_with_parameter(hwmgr,
-               PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high);
+               PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high, NULL);
        PP_ASSERT_WITH_CODE(!ret,
                "[SetAllowedFeaturesMask] Attempt to set allowed features mask(high) failed!",
                return ret);
 
        ret = smum_send_msg_to_smc_with_parameter(hwmgr,
-               PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low);
+               PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low, NULL);
        PP_ASSERT_WITH_CODE(!ret,
                "[SetAllowedFeaturesMask] Attempt to set allowed features mask (low) failed!",
                return ret);
@@ -915,12 +906,12 @@ static int vega20_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
 
 static int vega20_run_btc(struct pp_hwmgr *hwmgr)
 {
-       return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunBtc);
+       return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunBtc, NULL);
 }
 
 static int vega20_run_btc_afll(struct pp_hwmgr *hwmgr)
 {
-       return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAfllBtc);
+       return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAfllBtc, NULL);
 }
 
 static int vega20_enable_all_smu_features(struct pp_hwmgr *hwmgr)
@@ -933,7 +924,8 @@ static int vega20_enable_all_smu_features(struct pp_hwmgr *hwmgr)
        int ret = 0;
 
        PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
-                       PPSMC_MSG_EnableAllSmuFeatures)) == 0,
+                       PPSMC_MSG_EnableAllSmuFeatures,
+                       NULL)) == 0,
                        "[EnableAllSMUFeatures] Failed to enable all smu features!",
                        return ret);
 
@@ -966,7 +958,8 @@ static int vega20_notify_smc_display_change(struct pp_hwmgr *hwmgr)
        if (data->smu_features[GNLD_DPM_UCLK].enabled)
                return smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetUclkFastSwitch,
-                       1);
+                       1,
+                       NULL);
 
        return 0;
 }
@@ -978,7 +971,8 @@ static int vega20_send_clock_ratio(struct pp_hwmgr *hwmgr)
 
        return smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetFclkGfxClkRatio,
-                       data->registry_data.fclk_gfxclk_ratio);
+                       data->registry_data.fclk_gfxclk_ratio,
+                       NULL);
 }
 
 static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr)
@@ -991,7 +985,8 @@ static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr)
        int ret = 0;
 
        PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
-                       PPSMC_MSG_DisableAllSmuFeatures)) == 0,
+                       PPSMC_MSG_DisableAllSmuFeatures,
+                       NULL)) == 0,
                        "[DisableAllSMUFeatures] Failed to disable all smu features!",
                        return ret);
 
@@ -1199,12 +1194,12 @@ static int vega20_od8_get_gfx_clock_base_voltage(
 
        ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_GetAVFSVoltageByDpm,
-                       ((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq));
+                       ((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq),
+                       voltage);
        PP_ASSERT_WITH_CODE(!ret,
                        "[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!",
                        return ret);
 
-       *voltage = smum_get_argument(hwmgr);
        *voltage = *voltage / VOLTAGE_SCALE;
 
        return 0;
@@ -1560,19 +1555,19 @@ static int vega20_get_max_sustainable_clock(struct pp_hwmgr *hwmgr,
 
        PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_GetDcModeMaxDpmFreq,
-                       (clock_select << 16))) == 0,
+                       (clock_select << 16),
+                       clock)) == 0,
                        "[GetMaxSustainableClock] Failed to get max DC clock from SMC!",
                        return ret);
-       *clock = smum_get_argument(hwmgr);
 
        /* if DC limit is zero, return AC limit */
        if (*clock == 0) {
                PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_GetMaxDpmFreq,
-                       (clock_select << 16))) == 0,
+                       (clock_select << 16),
+                       clock)) == 0,
                        "[GetMaxSustainableClock] failed to get max AC clock from SMC!",
                        return ret);
-               *clock = smum_get_argument(hwmgr);
        }
 
        return 0;
@@ -1641,7 +1636,8 @@ static int vega20_enable_mgpu_fan_boost(struct pp_hwmgr *hwmgr)
        int result;
 
        result = smum_send_msg_to_smc(hwmgr,
-               PPSMC_MSG_SetMGpuFanBoostLimitRpm);
+               PPSMC_MSG_SetMGpuFanBoostLimitRpm,
+               NULL);
        PP_ASSERT_WITH_CODE(!result,
                        "[EnableMgpuFan] Failed to enable mgpu fan boost!",
                        return result);
@@ -1669,7 +1665,7 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
        int result = 0;
 
        smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_NumOfDisplays, 0);
+                       PPSMC_MSG_NumOfDisplays, 0, NULL);
 
        result = vega20_set_allowed_featuresmask(hwmgr);
        PP_ASSERT_WITH_CODE(!result,
@@ -1740,12 +1736,12 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
                        return result);
 
        result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetPptLimit,
-                       POWER_SOURCE_AC << 16);
+                       POWER_SOURCE_AC << 16, &hwmgr->default_power_limit);
        PP_ASSERT_WITH_CODE(!result,
                        "[GetPptLimit] get default PPT limit failed!",
                        return result);
        hwmgr->power_limit =
-               hwmgr->default_power_limit = smum_get_argument(hwmgr);
+               hwmgr->default_power_limit;
 
        return 0;
 }
@@ -1806,7 +1802,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
                min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level;
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMinByFreq,
-                                       (PPCLK_GFXCLK << 16) | (min_freq & 0xffff))),
+                                       (PPCLK_GFXCLK << 16) | (min_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft min gfxclk !",
                                        return ret);
        }
@@ -1816,7 +1813,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
                min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level;
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMinByFreq,
-                                       (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
+                                       (PPCLK_UCLK << 16) | (min_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft min memclk !",
                                        return ret);
        }
@@ -1827,7 +1825,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMinByFreq,
-                                       (PPCLK_VCLK << 16) | (min_freq & 0xffff))),
+                                       (PPCLK_VCLK << 16) | (min_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft min vclk!",
                                        return ret);
 
@@ -1835,7 +1834,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMinByFreq,
-                                       (PPCLK_DCLK << 16) | (min_freq & 0xffff))),
+                                       (PPCLK_DCLK << 16) | (min_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft min dclk!",
                                        return ret);
        }
@@ -1846,7 +1846,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMinByFreq,
-                                       (PPCLK_ECLK << 16) | (min_freq & 0xffff))),
+                                       (PPCLK_ECLK << 16) | (min_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft min eclk!",
                                        return ret);
        }
@@ -1857,7 +1858,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMinByFreq,
-                                       (PPCLK_SOCCLK << 16) | (min_freq & 0xffff))),
+                                       (PPCLK_SOCCLK << 16) | (min_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft min socclk!",
                                        return ret);
        }
@@ -1868,7 +1870,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMinByFreq,
-                                       (PPCLK_FCLK << 16) | (min_freq & 0xffff))),
+                                       (PPCLK_FCLK << 16) | (min_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft min fclk!",
                                        return ret);
        }
@@ -1879,7 +1882,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetHardMinByFreq,
-                                       (PPCLK_DCEFCLK << 16) | (min_freq & 0xffff))),
+                                       (PPCLK_DCEFCLK << 16) | (min_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set hard min dcefclk!",
                                        return ret);
        }
@@ -1900,7 +1904,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
-                                       (PPCLK_GFXCLK << 16) | (max_freq & 0xffff))),
+                                       (PPCLK_GFXCLK << 16) | (max_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft max gfxclk!",
                                        return ret);
        }
@@ -1911,7 +1916,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
-                                       (PPCLK_UCLK << 16) | (max_freq & 0xffff))),
+                                       (PPCLK_UCLK << 16) | (max_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft max memclk!",
                                        return ret);
        }
@@ -1922,14 +1928,16 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
-                                       (PPCLK_VCLK << 16) | (max_freq & 0xffff))),
+                                       (PPCLK_VCLK << 16) | (max_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft max vclk!",
                                        return ret);
 
                max_freq = data->dpm_table.dclk_table.dpm_state.soft_max_level;
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
-                                       (PPCLK_DCLK << 16) | (max_freq & 0xffff))),
+                                       (PPCLK_DCLK << 16) | (max_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft max dclk!",
                                        return ret);
        }
@@ -1940,7 +1948,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
-                                       (PPCLK_ECLK << 16) | (max_freq & 0xffff))),
+                                       (PPCLK_ECLK << 16) | (max_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft max eclk!",
                                        return ret);
        }
@@ -1951,7 +1960,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
-                                       (PPCLK_SOCCLK << 16) | (max_freq & 0xffff))),
+                                       (PPCLK_SOCCLK << 16) | (max_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft max socclk!",
                                        return ret);
        }
@@ -1962,7 +1972,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
-                                       (PPCLK_FCLK << 16) | (max_freq & 0xffff))),
+                                       (PPCLK_FCLK << 16) | (max_freq & 0xffff),
+                                       NULL)),
                                        "Failed to set soft max fclk!",
                                        return ret);
        }
@@ -2006,17 +2017,17 @@ static int vega20_get_clock_ranges(struct pp_hwmgr *hwmgr,
 
        if (max) {
                PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16))) == 0,
+                               PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16),
+                               clock)) == 0,
                                "[GetClockRanges] Failed to get max clock from SMC!",
                                return ret);
-               *clock = smum_get_argument(hwmgr);
        } else {
                PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_GetMinDpmFreq,
-                               (clock_select << 16))) == 0,
+                               (clock_select << 16),
+                               clock)) == 0,
                                "[GetClockRanges] Failed to get min clock from SMC!",
                                return ret);
-               *clock = smum_get_argument(hwmgr);
        }
 
        return 0;
@@ -2122,10 +2133,10 @@ static int vega20_get_current_clk_freq(struct pp_hwmgr *hwmgr,
        *clk_freq = 0;
 
        PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_GetDpmClockFreq, (clk_id << 16))) == 0,
+                       PPSMC_MSG_GetDpmClockFreq, (clk_id << 16),
+                       clk_freq)) == 0,
                        "[GetCurrentClkFreq] Attempt to get Current Frequency Failed!",
                        return ret);
-       *clk_freq = smum_get_argument(hwmgr);
 
        *clk_freq = *clk_freq * 100;
 
@@ -2276,7 +2287,8 @@ int vega20_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
                        clk_request = (clk_select << 16) | clk_freq;
                        result = smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_SetHardMinByFreq,
-                                       clk_request);
+                                       clk_request,
+                                       NULL);
                }
        }
 
@@ -2312,7 +2324,8 @@ static int vega20_notify_smc_display_config_after_ps_adjustment(
                        if (data->smu_features[GNLD_DS_DCEFCLK].supported)
                                PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
-                                       min_clocks.dcefClockInSR / 100)) == 0,
+                                       min_clocks.dcefClockInSR / 100,
+                                       NULL)) == 0,
                                        "Attempt to set divider for DCEFCLK Failed!",
                                        return ret);
                } else {
@@ -2324,7 +2337,8 @@ static int vega20_notify_smc_display_config_after_ps_adjustment(
                dpm_table->dpm_state.hard_min_level = min_clocks.memoryClock / 100;
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetHardMinByFreq,
-                               (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)),
+                               (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level,
+                               NULL)),
                                "[SetHardMinFreq] Set hard min uclk failed!",
                                return ret);
        }
@@ -2656,7 +2670,8 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
                        return -EINVAL;
 
                ret = smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_SetMinLinkDpmByIndex, soft_min_level);
+                       PPSMC_MSG_SetMinLinkDpmByIndex, soft_min_level,
+                       NULL);
                PP_ASSERT_WITH_CODE(!ret,
                        "Failed to set min link dpm level!",
                        return ret);
@@ -3140,7 +3155,7 @@ static int vega20_set_mp1_state(struct pp_hwmgr *hwmgr,
                return 0;
        }
 
-       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg)) == 0,
+       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg, NULL)) == 0,
                            "[PrepareMp1] Failed!",
                            return ret);
 
@@ -3495,7 +3510,8 @@ static int vega20_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr,
                dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetHardMinByFreq,
-                               (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)),
+                               (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level,
+                               NULL)),
                                "[SetUclkToHightestDpmLevel] Set hard min uclk failed!",
                                return ret);
        }
@@ -3520,7 +3536,8 @@ static int vega20_set_fclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr)
                dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetSoftMinByFreq,
-                               (PPCLK_FCLK << 16 ) | dpm_table->dpm_state.soft_min_level)),
+                               (PPCLK_FCLK << 16 ) | dpm_table->dpm_state.soft_min_level,
+                               NULL)),
                                "[SetFclkToHightestDpmLevel] Set soft min fclk failed!",
                                return ret);
        }
@@ -3534,7 +3551,7 @@ static int vega20_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
        int ret = 0;
 
        smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_NumOfDisplays, 0);
+                       PPSMC_MSG_NumOfDisplays, 0, NULL);
 
        ret = vega20_set_uclk_to_highest_dpm_level(hwmgr,
                        &data->dpm_table.mem_table);
@@ -3565,7 +3582,8 @@ static int vega20_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
            data->smu_features[GNLD_DPM_SOCCLK].supported) {
                result = smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_NumOfDisplays,
-                       hwmgr->display_config->num_display);
+                       hwmgr->display_config->num_display,
+                       NULL);
        }
 
        return result;
@@ -4082,7 +4100,8 @@ out:
        workload_type =
                conv_power_profile_to_pplib_workload(power_profile_mode);
        smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
-                                               1 << workload_type);
+                                               1 << workload_type,
+                                               NULL);
 
        hwmgr->power_profile_mode = power_profile_mode;
 
@@ -4098,21 +4117,26 @@ static int vega20_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
 {
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_SetSystemVirtualDramAddrHigh,
-                                       virtual_addr_hi);
+                                       virtual_addr_hi,
+                                       NULL);
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_SetSystemVirtualDramAddrLow,
-                                       virtual_addr_low);
+                                       virtual_addr_low,
+                                       NULL);
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_DramLogSetDramAddrHigh,
-                                       mc_addr_hi);
+                                       mc_addr_hi,
+                                       NULL);
 
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_DramLogSetDramAddrLow,
-                                       mc_addr_low);
+                                       mc_addr_low,
+                                       NULL);
 
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_DramLogSetDramSize,
-                                       size);
+                                       size,
+                                       NULL);
        return 0;
 }
 
@@ -4153,7 +4177,8 @@ static int vega20_smu_i2c_bus_access(struct pp_hwmgr *hwmgr, bool acquire)
                                                  (acquire ?
                                                  PPSMC_MSG_RequestI2CBus :
                                                  PPSMC_MSG_ReleaseI2CBus),
-                                                 0);
+                                                 0,
+                                                 NULL);
 
        PP_ASSERT_WITH_CODE(!res, "[SmuI2CAccessBus] Failed to access bus!", return res);
        return res;
@@ -4170,7 +4195,8 @@ static int vega20_set_df_cstate(struct pp_hwmgr *hwmgr,
                return -EINVAL;
        }
 
-       ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DFCstateControl, state);
+       ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DFCstateControl, state,
+                               NULL);
        if (ret)
                pr_err("SetDfCstate failed!\n");
 
@@ -4184,7 +4210,8 @@ static int vega20_set_xgmi_pstate(struct pp_hwmgr *hwmgr,
 
        ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                                                  PPSMC_MSG_SetXgmiMode,
-                                                 pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3);
+                                                 pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3,
+                                                 NULL);
        if (ret)
                pr_err("SetXgmiPstate failed!\n");
 
index a0bfb65cc5d6bb3659b3dc60f197936ece5ca03b..d7cc3d2d9e17d2425d90b0756008189dec63190b 100644 (file)
@@ -36,7 +36,8 @@ int vega20_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
 
        if (data->smu_features[GNLD_PPT].enabled)
                return smum_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_SetPptLimit, n);
+                               PPSMC_MSG_SetPptLimit, n,
+                               NULL);
 
        return 0;
 }
@@ -51,7 +52,8 @@ static int vega20_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr,
                uint32_t adjust_percent)
 {
        return smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_OverDriveSetPercentage, adjust_percent);
+                       PPSMC_MSG_OverDriveSetPercentage, adjust_percent,
+                       NULL);
 }
 
 int vega20_power_control_set_level(struct pp_hwmgr *hwmgr)
index ede54e87e287b5c838677e0739e60aa246aa7e65..7add2f60f49c4094ffac2603858e8a9d3b8712dd 100644 (file)
@@ -106,10 +106,10 @@ static int vega20_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
        int ret = 0;
 
        PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
-                               PPSMC_MSG_GetCurrentRpm)) == 0,
+                               PPSMC_MSG_GetCurrentRpm,
+                               current_rpm)) == 0,
                        "Attempt to get current RPM from SMC Failed!",
                        return ret);
-       *current_rpm = smum_get_argument(hwmgr);
 
        return 0;
 }
@@ -329,7 +329,8 @@ static int vega20_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
 
        ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetFanTemperatureTarget,
-                               (uint32_t)table->FanTargetTemperature);
+                               (uint32_t)table->FanTargetTemperature,
+                               NULL);
 
        return ret;
 }
index ae2c318dd6fac4d8b6fb5de077d6f5d7b33778dd..4d1c2a44a8b6e927ed1f5757e01acd52fb8c6947 100644 (file)
@@ -405,7 +405,9 @@ struct smu_context
        bool pm_enabled;
        bool is_apu;
 
-       uint32_t smc_if_version;
+       uint32_t smc_driver_if_version;
+       uint32_t smc_fw_if_version;
+       uint32_t smc_fw_version;
 
        bool uploading_custom_pp_table;
        bool dc_controlled_by_gpio;
@@ -489,6 +491,7 @@ struct pptable_funcs {
        int (*get_dpm_clk_limited)(struct smu_context *smu, enum smu_clk_type clk_type,
                                   uint32_t dpm_level, uint32_t *freq);
        int (*set_df_cstate)(struct smu_context *smu, enum pp_df_cstate state);
+       int (*allow_xgmi_power_down)(struct smu_context *smu, bool en);
        int (*update_pcie_parameters)(struct smu_context *smu, uint32_t pcie_gen_cap, uint32_t pcie_width_cap);
        int (*i2c_eeprom_init)(struct i2c_adapter *control);
        void (*i2c_eeprom_fini)(struct i2c_adapter *control);
@@ -580,11 +583,6 @@ int smu_check_fw_status(struct smu_context *smu);
 
 int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
 
-#define smu_i2c_eeprom_init(smu, control) \
-               ((smu)->ppt_funcs->i2c_eeprom_init ? (smu)->ppt_funcs->i2c_eeprom_init((control)) : -EINVAL)
-#define smu_i2c_eeprom_fini(smu, control) \
-               ((smu)->ppt_funcs->i2c_eeprom_fini ? (smu)->ppt_funcs->i2c_eeprom_fini((control)) : -EINVAL)
-
 int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed);
 
 int smu_get_power_limit(struct smu_context *smu,
@@ -734,6 +732,7 @@ int smu_set_mp1_state(struct smu_context *smu,
                      enum pp_mp1_state mp1_state);
 int smu_set_df_cstate(struct smu_context *smu,
                      enum pp_df_cstate state);
+int smu_allow_xgmi_power_down(struct smu_context *smu, bool en);
 
 int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
                                         struct pp_smu_nv_clock_table *max_clocks);
index f736d773f9d629ad8e0bc4833ec95f20f1bdd70d..e07478b6ac04da590b0949cc599e4bec91a02eaa 100644 (file)
 #define PPSMC_MSG_SetNumBadHbmPagesRetired      0x3A
 
 #define PPSMC_MSG_DFCstateControl               0x3B
-#define PPSMC_Message_Count                     0x3C
+#define PPSMC_MSG_GmiPwrDnControl                0x3D
+#define PPSMC_Message_Count                      0x3E
 
 typedef uint32_t PPSMC_Result;
 typedef uint32_t PPSMC_Msg;
index 2ffb666b97e6d609f99a0029d23f4dc9c0aa0ff7..15ed6cbdf36604ac893f0d72a7579915bed27974 100644 (file)
@@ -743,6 +743,7 @@ struct pp_hwmgr {
        bool pm_en;
        bool pp_one_vf;
        struct mutex smu_lock;
+       struct mutex msg_lock;
 
        uint32_t pp_table_version;
        void *device;
index ce5b5011c122ac86f82c3d386bab98e5e9a11921..8b82059d97e77ffadbbbecc746e691840321112e 100644 (file)
@@ -82,8 +82,8 @@
 // Other
 #define FEATURE_OUT_OF_BAND_MONITOR_BIT 24
 #define FEATURE_TEMP_DEPENDENT_VMIN_BIT 25
+#define FEATURE_PER_PART_VMIN_BIT       26
 
-#define FEATURE_SPARE_26_BIT            26
 #define FEATURE_SPARE_27_BIT            27
 #define FEATURE_SPARE_28_BIT            28
 #define FEATURE_SPARE_29_BIT            29
 
 #define FEATURE_OUT_OF_BAND_MONITOR_MASK  (1 << FEATURE_OUT_OF_BAND_MONITOR_BIT   )
 #define FEATURE_TEMP_DEPENDENT_VMIN_MASK  (1 << FEATURE_TEMP_DEPENDENT_VMIN_BIT )
+#define FEATURE_PER_PART_VMIN_MASK        (1 << FEATURE_PER_PART_VMIN_BIT        )
 
 
 //FIXME need updating
@@ -628,8 +629,14 @@ typedef struct {
   uint16_t BasePerformanceFrequencyCap;   //In Mhz
   uint16_t MaxPerformanceFrequencyCap;    //In Mhz
 
+  // Per-Part Vmin
+  uint16_t VDDGFX_VminLow;        // mv Q2
+  uint16_t VDDGFX_TVminLow;       //Celcius
+  uint16_t VDDGFX_VminLow_HiTemp; // mv Q2
+  uint16_t VDDGFX_VminLow_LoTemp; // mv Q2
+
   // SECTION: Reserved
-  uint32_t     Reserved[9];
+  uint32_t     Reserved[7];
 
   // SECTION: BOARD PARAMETERS
 
@@ -869,6 +876,10 @@ typedef struct {
   uint8_t   Mem_DownHystLimit;
   uint16_t  Mem_Fps;
 
+  uint32_t  BusyThreshold;                  // Q16
+  uint32_t  BusyHyst;
+  uint32_t  IdleHyst;
+
   uint32_t  MmHubPadding[8]; // SMU internal use
 } DpmActivityMonitorCoeffInt_t;
 
index 2f85a34c0591ad87585f0bdd6105ba94eef9a740..e9315eb5b48e1116c46b5359cab43b17457bce74 100644 (file)
@@ -27,7 +27,7 @@
 // *** IMPORTANT ***
 // SMU TEAM: Always increment the interface version if 
 // any structure is changed in this file
-#define SMU12_DRIVER_IF_VERSION 11
+#define SMU12_DRIVER_IF_VERSION 14
 
 typedef struct {
   int32_t value;
@@ -154,15 +154,19 @@ typedef enum {
 } CLOCK_IDs_e;
 
 // Throttler Status Bitmask
-#define THROTTLER_STATUS_BIT_SPL        0
-#define THROTTLER_STATUS_BIT_FPPT       1
-#define THROTTLER_STATUS_BIT_SPPT       2
-#define THROTTLER_STATUS_BIT_SPPT_APU   3
-#define THROTTLER_STATUS_BIT_THM_CORE   4
-#define THROTTLER_STATUS_BIT_THM_GFX    5
-#define THROTTLER_STATUS_BIT_THM_SOC    6
-#define THROTTLER_STATUS_BIT_TDC_VDD    7
-#define THROTTLER_STATUS_BIT_TDC_SOC    8
+#define THROTTLER_STATUS_BIT_SPL            0
+#define THROTTLER_STATUS_BIT_FPPT           1
+#define THROTTLER_STATUS_BIT_SPPT           2
+#define THROTTLER_STATUS_BIT_SPPT_APU       3
+#define THROTTLER_STATUS_BIT_THM_CORE       4
+#define THROTTLER_STATUS_BIT_THM_GFX        5
+#define THROTTLER_STATUS_BIT_THM_SOC        6
+#define THROTTLER_STATUS_BIT_TDC_VDD        7
+#define THROTTLER_STATUS_BIT_TDC_SOC        8
+#define THROTTLER_STATUS_BIT_PROCHOT_CPU    9
+#define THROTTLER_STATUS_BIT_PROCHOT_GFX   10
+#define THROTTLER_STATUS_BIT_EDC_CPU       11
+#define THROTTLER_STATUS_BIT_EDC_GFX       12
 
 typedef struct {
   uint16_t ClockFrequency[CLOCK_COUNT]; //[MHz]
@@ -180,7 +184,7 @@ typedef struct {
   uint16_t Power[2];                    //[mW] indices: VDDCR_VDD, VDDCR_SOC
 
   uint16_t FanPwm;                      //[milli]
-  uint16_t CurrentSocketPower;          //[mW]
+  uint16_t CurrentSocketPower;          //[W]
 
   uint16_t CoreFrequency[8];            //[MHz]
   uint16_t CorePower[8];                //[mW]
@@ -193,10 +197,16 @@ typedef struct {
   uint16_t ThrottlerStatus;
   uint16_t spare;
 
-  uint16_t StapmOriginalLimit;          //[mW]
-  uint16_t StapmCurrentLimit;           //[mW]
-  uint16_t ApuPower;              //[mW]
-  uint16_t dGpuPower;               //[mW]
+  uint16_t StapmOriginalLimit;          //[W]
+  uint16_t StapmCurrentLimit;           //[W]
+  uint16_t ApuPower;                    //[W]
+  uint16_t dGpuPower;                   //[W]
+
+  uint16_t VddTdcValue;                 //[mA]
+  uint16_t SocTdcValue;                 //[mA]
+  uint16_t VddEdcValue;                 //[mA]
+  uint16_t SocEdcValue;                 //[mA]
+  uint16_t reserve[2];
 } SmuMetrics_t;
 
 
index a5b4df1467130f47432ee6e3a15f503b3f9d2fc7..ee7dac4693d4144c645010d44db1f5457ad17586 100644 (file)
        __SMU_DUMMY_MAP(SetSoftMinJpeg),              \
        __SMU_DUMMY_MAP(SetHardMinFclkByFreq),        \
        __SMU_DUMMY_MAP(DFCstateControl), \
+       __SMU_DUMMY_MAP(GmiPwrDnControl), \
        __SMU_DUMMY_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE), \
        __SMU_DUMMY_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE), \
 
index 674e426ed59bbe1d056b2371fbf5777492604f27..6b3b451a80188a588a0912e87be6e60d60b209c1 100644 (file)
@@ -27,8 +27,8 @@
 
 #define SMU11_DRIVER_IF_VERSION_INV 0xFFFFFFFF
 #define SMU11_DRIVER_IF_VERSION_VG20 0x13
-#define SMU11_DRIVER_IF_VERSION_ARCT 0x12
-#define SMU11_DRIVER_IF_VERSION_NV10 0x35
+#define SMU11_DRIVER_IF_VERSION_ARCT 0x14
+#define SMU11_DRIVER_IF_VERSION_NV10 0x36
 #define SMU11_DRIVER_IF_VERSION_NV12 0x33
 #define SMU11_DRIVER_IF_VERSION_NV14 0x36
 
@@ -37,7 +37,6 @@
 #define MP0_SRAM                       0x03900000
 #define MP1_Public                     0x03b00000
 #define MP1_SRAM                       0x03c00004
-#define MP1_SMC_SIZE           0x40000
 
 /* address block */
 #define smnMP1_FIRMWARE_FLAGS          0x3010024
index c5288831aa15c29b244203b5604ee734fc5e6a0e..ad100b533d0496c8f4f4c5be28c3ced01692208b 100644 (file)
@@ -81,16 +81,15 @@ enum SMU10_TABLE_ID {
        SMU10_CLOCKTABLE,
 };
 
-extern uint32_t smum_get_argument(struct pp_hwmgr *hwmgr);
-
 extern int smum_download_powerplay_table(struct pp_hwmgr *hwmgr, void **table);
 
 extern int smum_upload_powerplay_table(struct pp_hwmgr *hwmgr);
 
-extern int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg);
+extern int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t *resp);
 
 extern int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
-                                       uint16_t msg, uint32_t parameter);
+                                       uint16_t msg, uint32_t parameter,
+                                       uint32_t *resp);
 
 extern int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr);
 
index 15030284b4446f4d75d67f4d17f24d1f246d7e09..0c9be864d072e7e06fb5e5e2d0d05ee56beaaacf 100644 (file)
@@ -423,6 +423,7 @@ static int navi10_append_powerplay_table(struct smu_context *smu)
        struct smu_table_context *table_context = &smu->smu_table;
        PPTable_t *smc_pptable = table_context->driver_pptable;
        struct atom_smc_dpm_info_v4_5 *smc_dpm_table;
+       struct atom_smc_dpm_info_v4_7 *smc_dpm_table_v4_7;
        int index, ret;
 
        index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
@@ -433,77 +434,33 @@ static int navi10_append_powerplay_table(struct smu_context *smu)
        if (ret)
                return ret;
 
-       memcpy(smc_pptable->I2cControllers, smc_dpm_table->I2cControllers,
-              sizeof(I2cControllerConfig_t) * NUM_I2C_CONTROLLERS);
-
-       /* SVI2 Board Parameters */
-       smc_pptable->MaxVoltageStepGfx = smc_dpm_table->MaxVoltageStepGfx;
-       smc_pptable->MaxVoltageStepSoc = smc_dpm_table->MaxVoltageStepSoc;
-       smc_pptable->VddGfxVrMapping = smc_dpm_table->VddGfxVrMapping;
-       smc_pptable->VddSocVrMapping = smc_dpm_table->VddSocVrMapping;
-       smc_pptable->VddMem0VrMapping = smc_dpm_table->VddMem0VrMapping;
-       smc_pptable->VddMem1VrMapping = smc_dpm_table->VddMem1VrMapping;
-       smc_pptable->GfxUlvPhaseSheddingMask = smc_dpm_table->GfxUlvPhaseSheddingMask;
-       smc_pptable->SocUlvPhaseSheddingMask = smc_dpm_table->SocUlvPhaseSheddingMask;
-       smc_pptable->ExternalSensorPresent = smc_dpm_table->ExternalSensorPresent;
-       smc_pptable->Padding8_V = smc_dpm_table->Padding8_V;
-
-       /* Telemetry Settings */
-       smc_pptable->GfxMaxCurrent = smc_dpm_table->GfxMaxCurrent;
-       smc_pptable->GfxOffset = smc_dpm_table->GfxOffset;
-       smc_pptable->Padding_TelemetryGfx = smc_dpm_table->Padding_TelemetryGfx;
-       smc_pptable->SocMaxCurrent = smc_dpm_table->SocMaxCurrent;
-       smc_pptable->SocOffset = smc_dpm_table->SocOffset;
-       smc_pptable->Padding_TelemetrySoc = smc_dpm_table->Padding_TelemetrySoc;
-       smc_pptable->Mem0MaxCurrent = smc_dpm_table->Mem0MaxCurrent;
-       smc_pptable->Mem0Offset = smc_dpm_table->Mem0Offset;
-       smc_pptable->Padding_TelemetryMem0 = smc_dpm_table->Padding_TelemetryMem0;
-       smc_pptable->Mem1MaxCurrent = smc_dpm_table->Mem1MaxCurrent;
-       smc_pptable->Mem1Offset = smc_dpm_table->Mem1Offset;
-       smc_pptable->Padding_TelemetryMem1 = smc_dpm_table->Padding_TelemetryMem1;
-
-       /* GPIO Settings */
-       smc_pptable->AcDcGpio = smc_dpm_table->AcDcGpio;
-       smc_pptable->AcDcPolarity = smc_dpm_table->AcDcPolarity;
-       smc_pptable->VR0HotGpio = smc_dpm_table->VR0HotGpio;
-       smc_pptable->VR0HotPolarity = smc_dpm_table->VR0HotPolarity;
-       smc_pptable->VR1HotGpio = smc_dpm_table->VR1HotGpio;
-       smc_pptable->VR1HotPolarity = smc_dpm_table->VR1HotPolarity;
-       smc_pptable->GthrGpio = smc_dpm_table->GthrGpio;
-       smc_pptable->GthrPolarity = smc_dpm_table->GthrPolarity;
-
-       /* LED Display Settings */
-       smc_pptable->LedPin0 = smc_dpm_table->LedPin0;
-       smc_pptable->LedPin1 = smc_dpm_table->LedPin1;
-       smc_pptable->LedPin2 = smc_dpm_table->LedPin2;
-       smc_pptable->padding8_4 = smc_dpm_table->padding8_4;
-
-       /* GFXCLK PLL Spread Spectrum */
-       smc_pptable->PllGfxclkSpreadEnabled = smc_dpm_table->PllGfxclkSpreadEnabled;
-       smc_pptable->PllGfxclkSpreadPercent = smc_dpm_table->PllGfxclkSpreadPercent;
-       smc_pptable->PllGfxclkSpreadFreq = smc_dpm_table->PllGfxclkSpreadFreq;
-
-       /* GFXCLK DFLL Spread Spectrum */
-       smc_pptable->DfllGfxclkSpreadEnabled = smc_dpm_table->DfllGfxclkSpreadEnabled;
-       smc_pptable->DfllGfxclkSpreadPercent = smc_dpm_table->DfllGfxclkSpreadPercent;
-       smc_pptable->DfllGfxclkSpreadFreq = smc_dpm_table->DfllGfxclkSpreadFreq;
-
-       /* UCLK Spread Spectrum */
-       smc_pptable->UclkSpreadEnabled = smc_dpm_table->UclkSpreadEnabled;
-       smc_pptable->UclkSpreadPercent = smc_dpm_table->UclkSpreadPercent;
-       smc_pptable->UclkSpreadFreq = smc_dpm_table->UclkSpreadFreq;
-
-       /* SOCCLK Spread Spectrum */
-       smc_pptable->SoclkSpreadEnabled = smc_dpm_table->SoclkSpreadEnabled;
-       smc_pptable->SocclkSpreadPercent = smc_dpm_table->SocclkSpreadPercent;
-       smc_pptable->SocclkSpreadFreq = smc_dpm_table->SocclkSpreadFreq;
-
-       /* Total board power */
-       smc_pptable->TotalBoardPower = smc_dpm_table->TotalBoardPower;
-       smc_pptable->BoardPadding = smc_dpm_table->BoardPadding;
-
-       /* Mvdd Svi2 Div Ratio Setting */
-       smc_pptable->MvddRatio = smc_dpm_table->MvddRatio;
+       pr_info("smc_dpm_info table revision(format.content): %d.%d\n",
+                       smc_dpm_table->table_header.format_revision,
+                       smc_dpm_table->table_header.content_revision);
+
+       if (smc_dpm_table->table_header.format_revision != 4) {
+               pr_err("smc_dpm_info table format revision is not 4!\n");
+               return -EINVAL;
+       }
+
+       switch (smc_dpm_table->table_header.content_revision) {
+       case 5: /* nv10 and nv14 */
+               memcpy(smc_pptable->I2cControllers, smc_dpm_table->I2cControllers,
+                       sizeof(*smc_dpm_table) - sizeof(smc_dpm_table->table_header));
+               break;
+       case 7: /* nv12 */
+               ret = smu_get_atom_data_table(smu, index, NULL, NULL, NULL,
+                                             (uint8_t **)&smc_dpm_table_v4_7);
+               if (ret)
+                       return ret;
+               memcpy(smc_pptable->I2cControllers, smc_dpm_table_v4_7->I2cControllers,
+                       sizeof(*smc_dpm_table_v4_7) - sizeof(smc_dpm_table_v4_7->table_header));
+               break;
+       default:
+               pr_err("smc_dpm_info with unsupported content revision %d!\n",
+                               smc_dpm_table->table_header.content_revision);
+               return -EINVAL;
+       }
 
        if (adev->pm.pp_feature & PP_GFXOFF_MASK) {
                /* TODO: remove it once SMU fw fix it */
@@ -1336,8 +1293,6 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u
        }
 
        if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
-               if (size < 0)
-                       return -EINVAL;
 
                ret = smu_update_table(smu,
                                       SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
@@ -1860,7 +1815,8 @@ static int navi10_get_power_limit(struct smu_context *smu,
        int power_src;
 
        if (!smu->power_limit) {
-               if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
+               if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT) &&
+                       !amdgpu_sriov_vf(smu->adev)) {
                        power_src = smu_power_get_index(smu, SMU_POWER_SOURCE_AC);
                        if (power_src < 0)
                                return -EINVAL;
@@ -2003,6 +1959,9 @@ static int navi10_set_default_od_settings(struct smu_context *smu, bool initiali
        OverDriveTable_t *od_table, *boot_od_table;
        int ret = 0;
 
+       if (amdgpu_sriov_vf(smu->adev))
+               return 0;
+
        ret = smu_v11_0_set_default_od_settings(smu, initialize, sizeof(OverDriveTable_t));
        if (ret)
                return ret;
index b0ed1b3fe79a351a82ab984aac7ccee57aff89fd..67476047c067d1d0f99b7ecba83ca2a00ed90dca 100644 (file)
@@ -296,6 +296,8 @@ static int renoir_print_clk_levels(struct smu_context *smu,
 
        for (i = 0; i < count; i++) {
                GET_DPM_CUR_FREQ(clk_table, clk_type, i, value);
+               if (!value)
+                       continue;
                size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
                                cur_value == value ? "*" : "");
                if (cur_value == value)
@@ -847,7 +849,7 @@ static int renoir_get_power_profile_mode(struct smu_context *smu,
        uint32_t i, size = 0;
        int16_t workload_type = 0;
 
-       if (!smu->pm_enabled || !buf)
+       if (!buf)
                return -EINVAL;
 
        for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
@@ -898,7 +900,7 @@ static bool renoir_is_dpm_running(struct smu_context *smu)
        struct amdgpu_device *adev = smu->adev;
 
        /*
-        * Util now, the pmfw hasn't exported the interface of SMU
+        * Until now, the pmfw hasn't exported the interface of SMU
         * feature mask to APU SKU so just force on all the feature
         * at early initial stage.
         */
@@ -955,6 +957,6 @@ static const struct pptable_funcs renoir_ppt_funcs = {
 void renoir_set_ppt_funcs(struct smu_context *smu)
 {
        smu->ppt_funcs = &renoir_ppt_funcs;
-       smu->smc_if_version = SMU12_DRIVER_IF_VERSION;
+       smu->smc_driver_if_version = SMU12_DRIVER_IF_VERSION;
        smu->is_apu = true;
 }
index 40c35bcc5a0a2c9a358e6773375e156c7a5bc01c..c97444841abcb40e6dd544fe87affc383391e2b0 100644 (file)
@@ -214,4 +214,9 @@ static inline int smu_send_smc_msg(struct smu_context *smu, enum smu_message_typ
 #define smu_set_power_source(smu, power_src) \
        ((smu)->ppt_funcs->set_power_source ? (smu)->ppt_funcs->set_power_source((smu), (power_src)) : 0)
 
+#define smu_i2c_eeprom_init(smu, control) \
+               ((smu)->ppt_funcs->i2c_eeprom_init ? (smu)->ppt_funcs->i2c_eeprom_init((control)) : 0)
+#define smu_i2c_eeprom_fini(smu, control) \
+               ((smu)->ppt_funcs->i2c_eeprom_fini ? (smu)->ppt_funcs->i2c_eeprom_fini((control)) : 0)
+
 #endif
index 655ba4fb05dcdf330e53c3548e54681a18a58208..ae0361e225bb27eb9ff813636f89e9021aff7d8d 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/firmware.h>
 #include <linux/module.h>
 #include <linux/pci.h>
+#include <linux/reboot.h>
 
 #define SMU_11_0_PARTIAL_PPTABLE
 
@@ -57,7 +58,7 @@ static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu,
                                              uint16_t msg)
 {
        struct amdgpu_device *adev = smu->adev;
-       WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
+       WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
        return 0;
 }
 
@@ -65,7 +66,7 @@ static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
 {
        struct amdgpu_device *adev = smu->adev;
 
-       *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
+       *arg = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82);
        return 0;
 }
 
@@ -75,7 +76,7 @@ static int smu_v11_0_wait_for_response(struct smu_context *smu)
        uint32_t cur_value, i, timeout = adev->usec_timeout * 10;
 
        for (i = 0; i < timeout; i++) {
-               cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
+               cur_value = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90);
                if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
                        return cur_value == 0x1 ? 0 : -EIO;
 
@@ -83,7 +84,10 @@ static int smu_v11_0_wait_for_response(struct smu_context *smu)
        }
 
        /* timeout means wrong logic */
-       return -ETIME;
+       if (i == timeout)
+               return -ETIME;
+
+       return RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
 }
 
 int
@@ -107,9 +111,9 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu,
                goto out;
        }
 
-       WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
+       WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
 
-       WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
+       WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
 
        smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
 
@@ -119,6 +123,7 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu,
                       smu_get_message_name(smu, msg), index, param, ret);
                goto out;
        }
+
        if (read_arg) {
                ret = smu_v11_0_read_arg(smu, read_arg);
                if (ret) {
@@ -201,13 +206,15 @@ int smu_v11_0_load_microcode(struct smu_context *smu)
        const struct smc_firmware_header_v1_0 *hdr;
        uint32_t addr_start = MP1_SRAM;
        uint32_t i;
+       uint32_t smc_fw_size;
        uint32_t mp1_fw_flags;
 
        hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
        src = (const uint32_t *)(adev->pm.fw->data +
                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+       smc_fw_size = hdr->header.ucode_size_bytes;
 
-       for (i = 1; i < MP1_SMC_SIZE/4 - 1; i++) {
+       for (i = 1; i < smc_fw_size/4 - 1; i++) {
                WREG32_PCIE(addr_start, src[i]);
                addr_start += 4;
        }
@@ -264,23 +271,23 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
 
        switch (smu->adev->asic_type) {
        case CHIP_VEGA20:
-               smu->smc_if_version = SMU11_DRIVER_IF_VERSION_VG20;
+               smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_VG20;
                break;
        case CHIP_ARCTURUS:
-               smu->smc_if_version = SMU11_DRIVER_IF_VERSION_ARCT;
+               smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_ARCT;
                break;
        case CHIP_NAVI10:
-               smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV10;
+               smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV10;
                break;
        case CHIP_NAVI12:
-               smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV12;
+               smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV12;
                break;
        case CHIP_NAVI14:
-               smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV14;
+               smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV14;
                break;
        default:
                pr_err("smu unsupported asic type:%d.\n", smu->adev->asic_type);
-               smu->smc_if_version = SMU11_DRIVER_IF_VERSION_INV;
+               smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_INV;
                break;
        }
 
@@ -292,10 +299,10 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
         * Considering above, we just leave user a warning message instead
         * of halt driver loading.
         */
-       if (if_version != smu->smc_if_version) {
+       if (if_version != smu->smc_driver_if_version) {
                pr_info("smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
                        "smu fw version = 0x%08x (%d.%d.%d)\n",
-                       smu->smc_if_version, if_version,
+                       smu->smc_driver_if_version, if_version,
                        smu_version, smu_major, smu_minor, smu_debug);
                pr_warn("SMU driver if version not matched\n");
        }
@@ -479,8 +486,6 @@ int smu_v11_0_init_power(struct smu_context *smu)
 {
        struct smu_power_context *smu_power = &smu->smu_power;
 
-       if (!smu->pm_enabled)
-               return 0;
        if (smu_power->power_context || smu_power->power_context_size != 0)
                return -EINVAL;
 
@@ -497,8 +502,6 @@ int smu_v11_0_fini_power(struct smu_context *smu)
 {
        struct smu_power_context *smu_power = &smu->smu_power;
 
-       if (!smu->pm_enabled)
-               return 0;
        if (!smu_power->power_context || smu_power->power_context_size == 0)
                return -EINVAL;
 
@@ -730,8 +733,9 @@ int smu_v11_0_parse_pptable(struct smu_context *smu)
        struct smu_table_context *table_context = &smu->smu_table;
        struct smu_table *table = &table_context->tables[SMU_TABLE_PPTABLE];
 
+       /* during TDR we need to free and alloc the pptable */
        if (table_context->driver_pptable)
-               return -EINVAL;
+               kfree(table_context->driver_pptable);
 
        table_context->driver_pptable = kzalloc(table->size, GFP_KERNEL);
 
@@ -771,6 +775,9 @@ int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
 {
        int ret;
 
+       if (amdgpu_sriov_vf(smu->adev))
+               return 0;
+
        ret = smu_send_smc_msg_with_param(smu,
                                          SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL);
        if (ret)
@@ -783,8 +790,6 @@ int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
 {
        struct smu_table_context *table_context = &smu->smu_table;
 
-       if (!smu->pm_enabled)
-               return 0;
        if (!table_context)
                return -EINVAL;
 
@@ -816,6 +821,9 @@ int smu_v11_0_set_tool_table_location(struct smu_context *smu)
        int ret = 0;
        struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
 
+       if (amdgpu_sriov_vf(smu->adev))
+               return 0;
+
        if (tool_table->mc_address) {
                ret = smu_send_smc_msg_with_param(smu,
                                SMU_MSG_SetToolsDramAddrHigh,
@@ -835,6 +843,9 @@ int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
 {
        int ret = 0;
 
+       if (amdgpu_sriov_vf(smu->adev))
+               return 0;
+
        if (!smu->pm_enabled)
                return ret;
 
@@ -849,6 +860,9 @@ int smu_v11_0_set_allowed_mask(struct smu_context *smu)
        int ret = 0;
        uint32_t feature_mask[2];
 
+       if (amdgpu_sriov_vf(smu->adev))
+               return 0;
+
        mutex_lock(&feature->mutex);
        if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64)
                goto failed;
@@ -877,6 +891,9 @@ int smu_v11_0_get_enabled_mask(struct smu_context *smu,
        struct smu_feature *feature = &smu->smu_feature;
        int ret = 0;
 
+       if (amdgpu_sriov_vf(smu->adev) && !amdgpu_sriov_is_pp_one_vf(smu->adev))
+               return 0;
+
        if (!feature_mask || num < 2)
                return -EINVAL;
 
@@ -932,8 +949,12 @@ int smu_v11_0_notify_display_change(struct smu_context *smu)
 {
        int ret = 0;
 
+       if (amdgpu_sriov_vf(smu->adev))
+               return 0;
+
        if (!smu->pm_enabled)
                return ret;
+
        if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
            smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
                ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
@@ -948,9 +969,6 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
        int ret = 0;
        int clk_id;
 
-       if (!smu->pm_enabled)
-               return ret;
-
        if ((smu_msg_get_index(smu, SMU_MSG_GetDcModeMaxDpmFreq) < 0) ||
            (smu_msg_get_index(smu, SMU_MSG_GetMaxDpmFreq) < 0))
                return 0;
@@ -1096,6 +1114,9 @@ int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
        int ret = 0;
        uint32_t max_power_limit;
 
+       if (amdgpu_sriov_vf(smu->adev))
+               return 0;
+
        max_power_limit = smu_v11_0_get_max_power_limit(smu);
 
        if (n > max_power_limit) {
@@ -1205,9 +1226,6 @@ int smu_v11_0_start_thermal_control(struct smu_context *smu)
        struct smu_temperature_range range;
        struct amdgpu_device *adev = smu->adev;
 
-       if (!smu->pm_enabled)
-               return ret;
-
        memcpy(&range, &smu11_thermal_policy[0], sizeof(struct smu_temperature_range));
 
        ret = smu_get_thermal_temperature_range(smu, &range);
@@ -1321,9 +1339,6 @@ smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
        enum smu_clk_type clk_select = 0;
        uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
 
-       if (!smu->pm_enabled)
-               return -EINVAL;
-
        if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) ||
                smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
                switch (clk_type) {
@@ -1533,39 +1548,59 @@ static int smu_v11_0_ack_ac_dc_interrupt(struct smu_context *smu)
 #define THM_11_0__SRCID__THM_DIG_THERM_L2H             0               /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH  */
 #define THM_11_0__SRCID__THM_DIG_THERM_H2L             1               /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL  */
 
+#define SMUIO_11_0__SRCID__SMUIO_GPIO19                        83
+
 static int smu_v11_0_irq_process(struct amdgpu_device *adev,
                                 struct amdgpu_irq_src *source,
                                 struct amdgpu_iv_entry *entry)
 {
        uint32_t client_id = entry->client_id;
        uint32_t src_id = entry->src_id;
+       /*
+        * ctxid is used to distinguish different
+        * events for SMCToHost interrupt.
+        */
+       uint32_t ctxid = entry->src_data[0];
 
        if (client_id == SOC15_IH_CLIENTID_THM) {
                switch (src_id) {
                case THM_11_0__SRCID__THM_DIG_THERM_L2H:
-                       pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
-                               PCI_BUS_NUM(adev->pdev->devfn),
-                               PCI_SLOT(adev->pdev->devfn),
-                               PCI_FUNC(adev->pdev->devfn));
+                       dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
+                       /*
+                        * SW CTF just occurred.
+                        * Try to do a graceful shutdown to prevent further damage.
+                        */
+                       dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
+                       orderly_poweroff(true);
                break;
                case THM_11_0__SRCID__THM_DIG_THERM_H2L:
-                       pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
-                               PCI_BUS_NUM(adev->pdev->devfn),
-                               PCI_SLOT(adev->pdev->devfn),
-                               PCI_FUNC(adev->pdev->devfn));
+                       dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n");
                break;
                default:
-                       pr_warn("GPU under temperature range unknown src id (%d), detected on PCIe %d:%d.%d!\n",
-                               src_id,
-                               PCI_BUS_NUM(adev->pdev->devfn),
-                               PCI_SLOT(adev->pdev->devfn),
-                               PCI_FUNC(adev->pdev->devfn));
+                       dev_emerg(adev->dev, "ERROR: GPU under temperature range unknown src id (%d)\n",
+                               src_id);
                break;
-
                }
+       } else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) {
+               dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
+               /*
+                * HW CTF just occurred. Shutdown to prevent further damage.
+                */
+               dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n");
+               orderly_poweroff(true);
        } else if (client_id == SOC15_IH_CLIENTID_MP1) {
-               if (src_id == 0xfe)
-                       smu_v11_0_ack_ac_dc_interrupt(&adev->smu);
+               if (src_id == 0xfe) {
+                       switch (ctxid) {
+                       case 0x3:
+                               dev_dbg(adev->dev, "Switched to AC mode!\n");
+                               smu_v11_0_ack_ac_dc_interrupt(&adev->smu);
+                               break;
+                       case 0x4:
+                               dev_dbg(adev->dev, "Switched to DC mode!\n");
+                               smu_v11_0_ack_ac_dc_interrupt(&adev->smu);
+                               break;
+                       }
+               }
        }
 
        return 0;
@@ -1605,6 +1640,13 @@ int smu_v11_0_register_irq_handler(struct smu_context *smu)
        if (ret)
                return ret;
 
+       /* Register CTF(GPIO_19) interrupt */
+       ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_ROM_SMUIO,
+                               SMUIO_11_0__SRCID__SMUIO_GPIO19,
+                               irq_src);
+       if (ret)
+               return ret;
+
        ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
                                0xfe,
                                irq_src);
@@ -1833,6 +1875,9 @@ int smu_v11_0_override_pcie_parameters(struct smu_context *smu)
        uint32_t pcie_gen = 0, pcie_width = 0;
        int ret;
 
+       if (amdgpu_sriov_vf(smu->adev))
+               return 0;
+
        if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
                pcie_gen = 3;
        else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
index 169ebdad87b87be62a8d58d754bee8a29565b7ee..4023d10fb49bb50ec80532c3018152c27b9a0db5 100644 (file)
 
 #include "asic_reg/mp/mp_12_0_0_offset.h"
 #include "asic_reg/mp/mp_12_0_0_sh_mask.h"
+#include "asic_reg/smuio/smuio_12_0_0_offset.h"
+#include "asic_reg/smuio/smuio_12_0_0_sh_mask.h"
 
-#define smnMP1_FIRMWARE_FLAGS                                0x3010024
+// because some SMU12 based ASICs use older ip offset tables
+// we should undefine this register from the smuio12 header
+// to prevent confusion down the road
+#undef mmPWR_MISC_CNTL_STATUS
 
-#define mmSMUIO_GFX_MISC_CNTL                                0x00c8
-#define mmSMUIO_GFX_MISC_CNTL_BASE_IDX                       0
-#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK          0x00000006L
-#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT        0x1
+#define smnMP1_FIRMWARE_FLAGS                                0x3010024
 
 int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
                                              uint16_t msg)
@@ -158,10 +160,10 @@ int smu_v12_0_check_fw_version(struct smu_context *smu)
         * Considering above, we just leave user a warning message instead
         * of halt driver loading.
         */
-       if (if_version != smu->smc_if_version) {
+       if (if_version != smu->smc_driver_if_version) {
                pr_info("smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
                        "smu fw version = 0x%08x (%d.%d.%d)\n",
-                       smu->smc_if_version, if_version,
+                       smu->smc_driver_if_version, if_version,
                        smu_version, smu_major, smu_minor, smu_debug);
                pr_warn("SMU driver if version not matched\n");
        }
index 868e2d5f6e621e1c04980314de23b6f9180e2353..85e5b1ed22c2993e356b8cd5022cc999806619c3 100644 (file)
@@ -2780,7 +2780,7 @@ static int ci_update_dpm_settings(struct pp_hwmgr *hwmgr,
 
        if (setting->bupdate_sclk) {
                if (!data->sclk_dpm_key_disabled)
-                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel);
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel, NULL);
                for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
                        if (levels[i].ActivityLevel !=
                                cpu_to_be16(setting->sclk_activity)) {
@@ -2810,12 +2810,12 @@ static int ci_update_dpm_settings(struct pp_hwmgr *hwmgr,
                        }
                }
                if (!data->sclk_dpm_key_disabled)
-                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel, NULL);
        }
 
        if (setting->bupdate_mclk) {
                if (!data->mclk_dpm_key_disabled)
-                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel);
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel, NULL);
                for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) {
                        if (mclk_levels[i].ActivityLevel !=
                                cpu_to_be16(setting->mclk_activity)) {
@@ -2845,7 +2845,7 @@ static int ci_update_dpm_settings(struct pp_hwmgr *hwmgr,
                        }
                }
                if (!data->mclk_dpm_key_disabled)
-                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel, NULL);
        }
        return 0;
 }
@@ -2881,8 +2881,9 @@ static int ci_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
                if (hwmgr->dpm_level & profile_mode_mask || !PP_CAP(PHM_PlatformCaps_UVDDPM))
                        break;
        }
-       ci_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDDPM_SetEnabledMask,
-                               data->dpm_level_enable_mask.uvd_dpm_enable_mask);
+       smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDDPM_SetEnabledMask,
+                               data->dpm_level_enable_mask.uvd_dpm_enable_mask,
+                               NULL);
 
        return 0;
 }
@@ -2912,8 +2913,9 @@ static int ci_update_vce_smc_table(struct pp_hwmgr *hwmgr)
                if (hwmgr->dpm_level & profile_mode_mask || !PP_CAP(PHM_PlatformCaps_VCEDPM))
                        break;
        }
-       ci_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VCEDPM_SetEnabledMask,
-                               data->dpm_level_enable_mask.vce_dpm_enable_mask);
+       smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VCEDPM_SetEnabledMask,
+                               data->dpm_level_enable_mask.vce_dpm_enable_mask,
+                               NULL);
 
        return 0;
 }
index 32ebb383c4568f623599154761e7356229d12987..ecb9ee46d6b35b8c57bdf705580f197024492fc1 100644 (file)
@@ -137,9 +137,7 @@ static int fiji_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr)
        PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS,
                        INTERRUPTS_ENABLED, 1);
 
-       cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, 0x20000);
-       cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
-       PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
+       smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_Test, 0x20000, NULL);
 
        /* Wait for done bit to be set */
        PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND,
@@ -203,8 +201,9 @@ static int fiji_start_avfs_btc(struct pp_hwmgr *hwmgr)
        struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
 
        if (0 != smu_data->avfs_btc_param) {
-               if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_PerformBtc, smu_data->avfs_btc_param)) {
+               if (0 != smum_send_msg_to_smc_with_parameter(hwmgr,
+                               PPSMC_MSG_PerformBtc, smu_data->avfs_btc_param,
+                               NULL)) {
                        pr_info("[AVFS][Fiji_PerformBtc] PerformBTC SMU msg failed");
                        result = -EINVAL;
                }
@@ -1913,7 +1912,8 @@ static int fiji_setup_dpm_led_config(struct pp_hwmgr *hwmgr)
        if (mask)
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                    PPSMC_MSG_LedConfig,
-                                                   mask);
+                                                   mask,
+                                                   NULL);
        return 0;
 }
 
@@ -2220,14 +2220,16 @@ static int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
                res = smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetFanMinPwm,
                                hwmgr->thermal_controller.
-                               advanceFanControlParameters.ucMinimumPWMLimit);
+                               advanceFanControlParameters.ucMinimumPWMLimit,
+                               NULL);
 
        if (!res && hwmgr->thermal_controller.
                        advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
                res = smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetFanSclkTarget,
                                hwmgr->thermal_controller.
-                               advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
+                               advanceFanControlParameters.ulMinFanSCLKAcousticLimit,
+                               NULL);
 
        if (res)
                phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
@@ -2242,7 +2244,7 @@ static int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
        if (!hwmgr->avfs_supported)
                return 0;
 
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs, NULL);
 
        return 0;
 }
@@ -2390,7 +2392,8 @@ static int fiji_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
                        PHM_PlatformCaps_StablePState))
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_UVDDPM_SetEnabledMask,
-                               (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+                               (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel),
+                               NULL);
        return 0;
 }
 
@@ -2422,7 +2425,8 @@ static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr)
        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_VCEDPM_SetEnabledMask,
-                               (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+                               (uint32_t)1 << smu_data->smc_state_table.VceBootLevel,
+                               NULL);
        return 0;
 }
 
@@ -2569,7 +2573,7 @@ static int fiji_update_dpm_settings(struct pp_hwmgr *hwmgr,
 
        if (setting->bupdate_sclk) {
                if (!data->sclk_dpm_key_disabled)
-                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel);
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel, NULL);
                for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
                        if (levels[i].ActivityLevel !=
                                cpu_to_be16(setting->sclk_activity)) {
@@ -2599,12 +2603,12 @@ static int fiji_update_dpm_settings(struct pp_hwmgr *hwmgr,
                        }
                }
                if (!data->sclk_dpm_key_disabled)
-                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel, NULL);
        }
 
        if (setting->bupdate_mclk) {
                if (!data->mclk_dpm_key_disabled)
-                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel);
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel, NULL);
                for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) {
                        if (mclk_levels[i].ActivityLevel !=
                                cpu_to_be16(setting->mclk_activity)) {
@@ -2634,7 +2638,7 @@ static int fiji_update_dpm_settings(struct pp_hwmgr *hwmgr,
                        }
                }
                if (!data->mclk_dpm_key_disabled)
-                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel, NULL);
        }
        return 0;
 }
@@ -2649,6 +2653,7 @@ const struct pp_smumgr_func fiji_smu_funcs = {
        .request_smu_load_specific_fw = NULL,
        .send_msg_to_smc = &smu7_send_msg_to_smc,
        .send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter,
+       .get_argument = smu7_get_argument,
        .download_pptable_settings = NULL,
        .upload_pptable_settings = NULL,
        .update_smc_table = fiji_update_smc_table,
index 732005c03a82d89f7528ee067ea93c3f259d7195..431ad2fd38df19b2cc2e54e4000acd8df65d4670 100644 (file)
@@ -2669,6 +2669,7 @@ const struct pp_smumgr_func iceland_smu_funcs = {
        .request_smu_load_specific_fw = &iceland_request_smu_load_specific_fw,
        .send_msg_to_smc = &smu7_send_msg_to_smc,
        .send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter,
+       .get_argument = smu7_get_argument,
        .download_pptable_settings = NULL,
        .upload_pptable_settings = NULL,
        .get_offsetof = iceland_get_offsetof,
index 23c12018dbc18c582a99610d8575d6555861c7f7..c3d2e6dcf62a09387e3f86f60234a3bea75747a4 100644 (file)
@@ -99,7 +99,8 @@ static int polaris10_perform_btc(struct pp_hwmgr *hwmgr)
        struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
 
        if (0 != smu_data->avfs_btc_param) {
-               if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PerformBtc, smu_data->avfs_btc_param)) {
+               if (0 != smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PerformBtc, smu_data->avfs_btc_param,
+                                       NULL)) {
                        pr_info("[AVFS][SmuPolaris10_PerformBtc] PerformBTC SMU msg failed");
                        result = -1;
                }
@@ -2049,15 +2050,16 @@ int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
                return 0;
 
        smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting);
+                       PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting,
+                       NULL);
 
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs, NULL);
 
        /* Apply avfs cks-off voltages to avoid the overshoot
         * when switching to the highest sclk frequency
         */
        if (data->apply_avfs_cks_off_voltage)
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage, NULL);
 
        return 0;
 }
@@ -2158,14 +2160,16 @@ static int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
                res = smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetFanMinPwm,
                                hwmgr->thermal_controller.
-                               advanceFanControlParameters.ucMinimumPWMLimit);
+                               advanceFanControlParameters.ucMinimumPWMLimit,
+                               NULL);
 
        if (!res && hwmgr->thermal_controller.
                        advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
                res = smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetFanSclkTarget,
                                hwmgr->thermal_controller.
-                               advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
+                               advanceFanControlParameters.ulMinFanSCLKAcousticLimit,
+                               NULL);
 
        if (res)
                phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
@@ -2202,7 +2206,8 @@ static int polaris10_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
                        PHM_PlatformCaps_StablePState))
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_UVDDPM_SetEnabledMask,
-                               (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+                               (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel),
+                               NULL);
        return 0;
 }
 
@@ -2234,7 +2239,8 @@ static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr)
        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_VCEDPM_SetEnabledMask,
-                               (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+                               (uint32_t)1 << smu_data->smc_state_table.VceBootLevel,
+                               NULL);
        return 0;
 }
 
@@ -2485,7 +2491,7 @@ static int polaris10_update_dpm_settings(struct pp_hwmgr *hwmgr,
 
        if (setting->bupdate_sclk) {
                if (!data->sclk_dpm_key_disabled)
-                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel);
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel, NULL);
                for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
                        if (levels[i].ActivityLevel !=
                                cpu_to_be16(setting->sclk_activity)) {
@@ -2515,12 +2521,12 @@ static int polaris10_update_dpm_settings(struct pp_hwmgr *hwmgr,
                        }
                }
                if (!data->sclk_dpm_key_disabled)
-                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel, NULL);
        }
 
        if (setting->bupdate_mclk) {
                if (!data->mclk_dpm_key_disabled)
-                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel);
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel, NULL);
                for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) {
                        if (mclk_levels[i].ActivityLevel !=
                                cpu_to_be16(setting->mclk_activity)) {
@@ -2550,7 +2556,7 @@ static int polaris10_update_dpm_settings(struct pp_hwmgr *hwmgr,
                        }
                }
                if (!data->mclk_dpm_key_disabled)
-                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel, NULL);
        }
        return 0;
 }
@@ -2565,6 +2571,7 @@ const struct pp_smumgr_func polaris10_smu_funcs = {
        .request_smu_load_specific_fw = NULL,
        .send_msg_to_smc = smu7_send_msg_to_smc,
        .send_msg_to_smc_with_parameter = smu7_send_msg_to_smc_with_parameter,
+       .get_argument = smu7_get_argument,
        .download_pptable_settings = NULL,
        .upload_pptable_settings = NULL,
        .update_smc_table = polaris10_update_smc_table,
index 2319400a3fcb60d046323d422e7c9484ccfeaa32..ea2279bb8cbfd24a77799e9ee5acb0896b347ef8 100644 (file)
@@ -126,15 +126,18 @@ static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
                        "Invalid SMU Table version!", return -EINVAL;);
        PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
                        "Invalid SMU Table Length!", return -EINVAL;);
-       smu10_send_msg_to_smc_with_parameter(hwmgr,
+       smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrHigh,
-                       upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
-       smu10_send_msg_to_smc_with_parameter(hwmgr,
+                       upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+                       NULL);
+       smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrLow,
-                       lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
-       smu10_send_msg_to_smc_with_parameter(hwmgr,
+                       lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+                       NULL);
+       smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_TransferTableSmu2Dram,
-                       priv->smu_tables.entry[table_id].table_id);
+                       priv->smu_tables.entry[table_id].table_id,
+                       NULL);
 
        /* flush hdp cache */
        amdgpu_asic_flush_hdp(adev, NULL);
@@ -164,15 +167,18 @@ static int smu10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
 
        amdgpu_asic_flush_hdp(adev, NULL);
 
-       smu10_send_msg_to_smc_with_parameter(hwmgr,
+       smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrHigh,
-                       upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
-       smu10_send_msg_to_smc_with_parameter(hwmgr,
+                       upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+                       NULL);
+       smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrLow,
-                       lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
-       smu10_send_msg_to_smc_with_parameter(hwmgr,
+                       lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+                       NULL);
+       smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_TransferTableDram2Smu,
-                       priv->smu_tables.entry[table_id].table_id);
+                       priv->smu_tables.entry[table_id].table_id,
+                       NULL);
 
        return 0;
 }
@@ -181,9 +187,9 @@ static int smu10_verify_smc_interface(struct pp_hwmgr *hwmgr)
 {
        uint32_t smc_driver_if_version;
 
-       smu10_send_msg_to_smc(hwmgr,
-                       PPSMC_MSG_GetDriverIfVersion);
-       smc_driver_if_version = smu10_read_arg_from_smc(hwmgr);
+       smum_send_msg_to_smc(hwmgr,
+                       PPSMC_MSG_GetDriverIfVersion,
+                       &smc_driver_if_version);
 
        if ((smc_driver_if_version != SMU10_DRIVER_IF_VERSION) &&
            (smc_driver_if_version != SMU10_DRIVER_IF_VERSION + 1)) {
@@ -217,11 +223,11 @@ static int smu10_start_smu(struct pp_hwmgr *hwmgr)
 {
        struct amdgpu_device *adev = hwmgr->adev;
 
-       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
-       hwmgr->smu_version = smu10_read_arg_from_smc(hwmgr);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion, &hwmgr->smu_version);
        adev->pm.fw_version = hwmgr->smu_version >> 8;
 
-       if (adev->rev_id < 0x8 && adev->pdev->device != 0x15d8 &&
+       if (!(adev->apu_flags & AMD_APU_IS_RAVEN2) &&
+           (adev->apu_flags & AMD_APU_IS_RAVEN) &&
            adev->pm.fw_version < 0x1e45)
                adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
 
index 3f51d545e8ff3931dcfe57c0f859c464dcdf8adb..aae25243eb10d33a5ea84903ea790e2daa0213d3 100644 (file)
@@ -191,13 +191,6 @@ int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
        return 0;
 }
 
-int smu7_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg)
-{
-       cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
-
-       return 0;
-}
-
 int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
 {
        PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
@@ -207,25 +200,14 @@ int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, ui
        return smu7_send_msg_to_smc(hwmgr, msg);
 }
 
-int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
+uint32_t smu7_get_argument(struct pp_hwmgr *hwmgr)
 {
-       cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter);
-
-       return smu7_send_msg_to_smc_without_waiting(hwmgr, msg);
+       return cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
 }
 
 int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr)
 {
-       cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, 0x20000);
-
-       cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
-
-       PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
-
-       if (1 != PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP))
-               pr_info("Failed to send Message.\n");
-
-       return 0;
+       return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_Test, 0x20000, NULL);
 }
 
 enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type)
@@ -353,12 +335,14 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
 
        if (hwmgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */
                if (hwmgr->not_vf) {
-                       smu7_send_msg_to_smc_with_parameter(hwmgr,
+                       smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SMU_DRAM_ADDR_HI,
-                                               upper_32_bits(smu_data->smu_buffer.mc_addr));
-                       smu7_send_msg_to_smc_with_parameter(hwmgr,
+                                               upper_32_bits(smu_data->smu_buffer.mc_addr),
+                                               NULL);
+                       smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SMU_DRAM_ADDR_LO,
-                                               lower_32_bits(smu_data->smu_buffer.mc_addr));
+                                               lower_32_bits(smu_data->smu_buffer.mc_addr),
+                                               NULL);
                }
                fw_to_load = UCODE_ID_RLC_G_MASK
                           + UCODE_ID_SDMA0_MASK
@@ -423,10 +407,16 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
        }
        memcpy_toio(smu_data->header_buffer.kaddr, smu_data->toc,
                    sizeof(struct SMU_DRAMData_TOC));
-       smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, upper_32_bits(smu_data->header_buffer.mc_addr));
-       smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, lower_32_bits(smu_data->header_buffer.mc_addr));
-
-       smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load);
+       smum_send_msg_to_smc_with_parameter(hwmgr,
+                       PPSMC_MSG_DRV_DRAM_ADDR_HI,
+                       upper_32_bits(smu_data->header_buffer.mc_addr),
+                       NULL);
+       smum_send_msg_to_smc_with_parameter(hwmgr,
+                       PPSMC_MSG_DRV_DRAM_ADDR_LO,
+                       lower_32_bits(smu_data->header_buffer.mc_addr),
+                       NULL);
+
+       smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load, NULL);
 
        r = smu7_check_fw_load_finish(hwmgr, fw_to_load);
        if (!r)
index 01f0538fba6b9e8edc20febbda4178dd4aa545d4..e7303dc8c260b197146fca4d29bfc443c6f0d5e4 100644 (file)
@@ -60,11 +60,9 @@ int smu7_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
 int smu7_program_jump_on_start(struct pp_hwmgr *hwmgr);
 bool smu7_is_smc_ram_running(struct pp_hwmgr *hwmgr);
 int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg);
-int smu7_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg);
 int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg,
                                                uint32_t parameter);
-int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_hwmgr *hwmgr,
-                                               uint16_t msg, uint32_t parameter);
+uint32_t smu7_get_argument(struct pp_hwmgr *hwmgr);
 int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr);
 
 enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type);
index 7dca04a8921730a3eb046275253f9464322f7bbe..76d4f12ceedf3babe753f7f5cab5f507ba684ef6 100644 (file)
@@ -610,18 +610,21 @@ static int smu8_download_pptable_settings(struct pp_hwmgr *hwmgr, void **table)
 
        *table = (struct SMU8_Fusion_ClkTable *)smu8_smu->scratch_buffer[i].kaddr;
 
-       smu8_send_msg_to_smc_with_parameter(hwmgr,
+       smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetClkTableAddrHi,
-                               upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
+                               upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr),
+                               NULL);
 
-       smu8_send_msg_to_smc_with_parameter(hwmgr,
+       smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetClkTableAddrLo,
-                               lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
+                               lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr),
+                               NULL);
 
-       smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
-                               smu8_smu->toc_entry_clock_table);
+       smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
+                               smu8_smu->toc_entry_clock_table,
+                               NULL);
 
-       smu8_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToDram);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToDram, NULL);
 
        return 0;
 }
@@ -637,18 +640,21 @@ static int smu8_upload_pptable_settings(struct pp_hwmgr *hwmgr)
                        break;
        }
 
-       smu8_send_msg_to_smc_with_parameter(hwmgr,
+       smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetClkTableAddrHi,
-                               upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
+                               upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr),
+                               NULL);
 
-       smu8_send_msg_to_smc_with_parameter(hwmgr,
+       smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetClkTableAddrLo,
-                               lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
+                               lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr),
+                               NULL);
 
-       smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
-                               smu8_smu->toc_entry_clock_table);
+       smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
+                               smu8_smu->toc_entry_clock_table,
+                               NULL);
 
-       smu8_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToSmu);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToSmu, NULL);
 
        return 0;
 }
@@ -671,25 +677,30 @@ static int smu8_request_smu_load_fw(struct pp_hwmgr *hwmgr)
 
        smu8_write_smc_sram_dword(hwmgr, smc_address, 0, smc_address+4);
 
-       smu8_send_msg_to_smc_with_parameter(hwmgr,
+       smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_DriverDramAddrHi,
-                                       upper_32_bits(smu8_smu->toc_buffer.mc_addr));
+                                       upper_32_bits(smu8_smu->toc_buffer.mc_addr),
+                                       NULL);
 
-       smu8_send_msg_to_smc_with_parameter(hwmgr,
+       smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_DriverDramAddrLo,
-                                       lower_32_bits(smu8_smu->toc_buffer.mc_addr));
+                                       lower_32_bits(smu8_smu->toc_buffer.mc_addr),
+                                       NULL);
 
-       smu8_send_msg_to_smc(hwmgr, PPSMC_MSG_InitJobs);
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitJobs, NULL);
 
-       smu8_send_msg_to_smc_with_parameter(hwmgr,
+       smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_ExecuteJob,
-                                       smu8_smu->toc_entry_aram);
-       smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
-                               smu8_smu->toc_entry_power_profiling_index);
+                                       smu8_smu->toc_entry_aram,
+                                       NULL);
+       smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
+                               smu8_smu->toc_entry_power_profiling_index,
+                               NULL);
 
-       smu8_send_msg_to_smc_with_parameter(hwmgr,
+       smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_ExecuteJob,
-                                       smu8_smu->toc_entry_initialize_index);
+                                       smu8_smu->toc_entry_initialize_index,
+                                       NULL);
 
        fw_to_check = UCODE_ID_RLC_G_MASK |
                        UCODE_ID_SDMA0_MASK |
@@ -860,11 +871,13 @@ static bool smu8_dpm_check_smu_features(struct pp_hwmgr *hwmgr,
                                unsigned long check_feature)
 {
        int result;
-       unsigned long features;
+       uint32_t features;
 
-       result = smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetFeatureStatus, 0);
+       result = smum_send_msg_to_smc_with_parameter(hwmgr,
+                               PPSMC_MSG_GetFeatureStatus,
+                               0,
+                               &features);
        if (result == 0) {
-               features = smum_get_argument(hwmgr);
                if (features & check_feature)
                        return true;
        }
index 4240aeec9000e9f0e9677d741dfa4d0f180fa196..b6fb480668416a92892f5e6d85122d0a5ac2eb8b 100644 (file)
@@ -103,14 +103,6 @@ int smum_process_firmware_header(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
-uint32_t smum_get_argument(struct pp_hwmgr *hwmgr)
-{
-       if (NULL != hwmgr->smumgr_funcs->get_argument)
-               return hwmgr->smumgr_funcs->get_argument(hwmgr);
-
-       return 0;
-}
-
 uint32_t smum_get_mac_definition(struct pp_hwmgr *hwmgr, uint32_t value)
 {
        if (NULL != hwmgr->smumgr_funcs->get_mac_definition)
@@ -135,22 +127,58 @@ int smum_upload_powerplay_table(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
-int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
+int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t *resp)
 {
-       if (hwmgr == NULL || hwmgr->smumgr_funcs->send_msg_to_smc == NULL)
+       int ret = 0;
+
+       if (hwmgr == NULL ||
+           hwmgr->smumgr_funcs->send_msg_to_smc == NULL ||
+           (resp && !hwmgr->smumgr_funcs->get_argument))
                return -EINVAL;
 
-       return hwmgr->smumgr_funcs->send_msg_to_smc(hwmgr, msg);
+       mutex_lock(&hwmgr->msg_lock);
+
+       ret = hwmgr->smumgr_funcs->send_msg_to_smc(hwmgr, msg);
+       if (ret) {
+               mutex_unlock(&hwmgr->msg_lock);
+               return ret;
+       }
+
+       if (resp)
+               *resp = hwmgr->smumgr_funcs->get_argument(hwmgr);
+
+       mutex_unlock(&hwmgr->msg_lock);
+
+       return ret;
 }
 
 int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
-                                       uint16_t msg, uint32_t parameter)
+                                       uint16_t msg,
+                                       uint32_t parameter,
+                                       uint32_t *resp)
 {
+       int ret = 0;
+
        if (hwmgr == NULL ||
-               hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter == NULL)
+           hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter == NULL ||
+           (resp && !hwmgr->smumgr_funcs->get_argument))
                return -EINVAL;
-       return hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter(
+
+       mutex_lock(&hwmgr->msg_lock);
+
+       ret = hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter(
                                                hwmgr, msg, parameter);
+       if (ret) {
+               mutex_unlock(&hwmgr->msg_lock);
+               return ret;
+       }
+
+       if (resp)
+               *resp = hwmgr->smumgr_funcs->get_argument(hwmgr);
+
+       mutex_unlock(&hwmgr->msg_lock);
+
+       return ret;
 }
 
 int smum_init_smc_table(struct pp_hwmgr *hwmgr)
index f19bac7ef7ba6cfa15dc906a65c1124c605d91f2..398e7e3587de141d455ba95dafc2879754580754 100644 (file)
@@ -2702,7 +2702,8 @@ static int tonga_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
                        PHM_PlatformCaps_StablePState))
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_UVDDPM_SetEnabledMask,
-                               (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+                               (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel),
+                               NULL);
        return 0;
 }
 
@@ -2733,7 +2734,8 @@ static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr)
                                        PHM_PlatformCaps_StablePState))
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_VCEDPM_SetEnabledMask,
-                               (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+                               (uint32_t)1 << smu_data->smc_state_table.VceBootLevel,
+                               NULL);
        return 0;
 }
 
@@ -3168,7 +3170,7 @@ static int tonga_update_dpm_settings(struct pp_hwmgr *hwmgr,
 
        if (setting->bupdate_sclk) {
                if (!data->sclk_dpm_key_disabled)
-                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel);
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel, NULL);
                for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
                        if (levels[i].ActivityLevel !=
                                cpu_to_be16(setting->sclk_activity)) {
@@ -3198,12 +3200,12 @@ static int tonga_update_dpm_settings(struct pp_hwmgr *hwmgr,
                        }
                }
                if (!data->sclk_dpm_key_disabled)
-                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel, NULL);
        }
 
        if (setting->bupdate_mclk) {
                if (!data->mclk_dpm_key_disabled)
-                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel);
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel, NULL);
                for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) {
                        if (mclk_levels[i].ActivityLevel !=
                                cpu_to_be16(setting->mclk_activity)) {
@@ -3233,7 +3235,7 @@ static int tonga_update_dpm_settings(struct pp_hwmgr *hwmgr,
                        }
                }
                if (!data->mclk_dpm_key_disabled)
-                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel, NULL);
        }
        return 0;
 }
@@ -3248,6 +3250,7 @@ const struct pp_smumgr_func tonga_smu_funcs = {
        .request_smu_load_specific_fw = NULL,
        .send_msg_to_smc = &smu7_send_msg_to_smc,
        .send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter,
+       .get_argument = smu7_get_argument,
        .download_pptable_settings = NULL,
        .upload_pptable_settings = NULL,
        .update_smc_table = tonga_update_smc_table,
index 715564009089615e6ce9b0dbc86ec2fe78307a9f..1e222c5d91a455e2b7b974ae54dffd49a7c2949e 100644 (file)
@@ -47,15 +47,18 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
                        "Invalid SMU Table version!", return -EINVAL);
        PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
                        "Invalid SMU Table Length!", return -EINVAL);
-       smu9_send_msg_to_smc_with_parameter(hwmgr,
+       smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrHigh,
-                       upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
-       smu9_send_msg_to_smc_with_parameter(hwmgr,
+                       upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+                       NULL);
+       smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrLow,
-                       lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
-       smu9_send_msg_to_smc_with_parameter(hwmgr,
+                       lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+                       NULL);
+       smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_TransferTableSmu2Dram,
-                       priv->smu_tables.entry[table_id].table_id);
+                       priv->smu_tables.entry[table_id].table_id,
+                       NULL);
 
        /* flush hdp cache */
        amdgpu_asic_flush_hdp(adev, NULL);
@@ -90,15 +93,18 @@ static int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
 
        amdgpu_asic_flush_hdp(adev, NULL);
 
-       smu9_send_msg_to_smc_with_parameter(hwmgr,
+       smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrHigh,
-                       upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
-       smu9_send_msg_to_smc_with_parameter(hwmgr,
+                       upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+                       NULL);
+       smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrLow,
-                       lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
-       smu9_send_msg_to_smc_with_parameter(hwmgr,
+                       lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+                       NULL);
+       smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_TransferTableDram2Smu,
-                       priv->smu_tables.entry[table_id].table_id);
+                       priv->smu_tables.entry[table_id].table_id,
+                       NULL);
 
        return 0;
 }
@@ -118,17 +124,21 @@ int vega10_enable_smc_features(struct pp_hwmgr *hwmgr,
                return 0;
 
        return smum_send_msg_to_smc_with_parameter(hwmgr,
-                       msg, feature_mask);
+                       msg, feature_mask, NULL);
 }
 
 int vega10_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
                            uint64_t *features_enabled)
 {
+       uint32_t enabled_features;
+
        if (features_enabled == NULL)
                return -EINVAL;
 
-       smu9_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeatures);
-       *features_enabled = smu9_get_argument(hwmgr);
+       smum_send_msg_to_smc(hwmgr,
+                       PPSMC_MSG_GetEnabledSmuFeatures,
+                       &enabled_features);
+       *features_enabled = enabled_features;
 
        return 0;
 }
@@ -150,12 +160,14 @@ static int vega10_set_tools_address(struct pp_hwmgr *hwmgr)
        struct vega10_smumgr *priv = hwmgr->smu_backend;
 
        if (priv->smu_tables.entry[TOOLSTABLE].mc_addr) {
-               smu9_send_msg_to_smc_with_parameter(hwmgr,
+               smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetToolsDramAddrHigh,
-                               upper_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr));
-               smu9_send_msg_to_smc_with_parameter(hwmgr,
+                               upper_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr),
+                               NULL);
+               smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetToolsDramAddrLow,
-                               lower_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr));
+                               lower_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr),
+                               NULL);
        }
        return 0;
 }
@@ -167,11 +179,11 @@ static int vega10_verify_smc_interface(struct pp_hwmgr *hwmgr)
        uint32_t dev_id;
        uint32_t rev_id;
 
-       PP_ASSERT_WITH_CODE(!smu9_send_msg_to_smc(hwmgr,
-                       PPSMC_MSG_GetDriverIfVersion),
+       PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
+                       PPSMC_MSG_GetDriverIfVersion,
+                       &smc_driver_if_version),
                        "Attempt to get SMC IF Version Number Failed!",
                        return -EINVAL);
-       smc_driver_if_version = smu9_get_argument(hwmgr);
 
        dev_id = adev->pdev->device;
        rev_id = adev->pdev->revision;
index 275dbf65f1a0c5428e21096948806448db7fbf5e..f54df76537e4b197b246234ba3b00af4a38f069a 100644 (file)
@@ -50,18 +50,21 @@ static int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
                        "Invalid SMU Table version!", return -EINVAL);
        PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
                        "Invalid SMU Table Length!", return -EINVAL);
-       PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
+       PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrHigh,
-                       upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
+                       upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+                       NULL) == 0,
                        "[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL);
-       PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
+       PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrLow,
-                       lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
+                       lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+                       NULL) == 0,
                        "[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!",
                        return -EINVAL);
-       PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
+       PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_TransferTableSmu2Dram,
-                       table_id) == 0,
+                       table_id,
+                       NULL) == 0,
                        "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
                        return -EINVAL);
 
@@ -98,19 +101,22 @@ static int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr,
 
        amdgpu_asic_flush_hdp(adev, NULL);
 
-       PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
+       PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrHigh,
-                       upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
+                       upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+                       NULL) == 0,
                        "[CopyTableToSMC] Attempt to Set Dram Addr High Failed!",
                        return -EINVAL;);
-       PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
+       PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrLow,
-                       lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
+                       lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+                       NULL) == 0,
                        "[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!",
                        return -EINVAL);
-       PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
+       PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_TransferTableDram2Smu,
-                       table_id) == 0,
+                       table_id,
+                       NULL) == 0,
                        "[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!",
                        return -EINVAL);
 
@@ -126,21 +132,21 @@ int vega12_enable_smc_features(struct pp_hwmgr *hwmgr,
        smu_features_high = (uint32_t)((feature_mask & SMU_FEATURES_HIGH_MASK) >> SMU_FEATURES_HIGH_SHIFT);
 
        if (enable) {
-               PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low) == 0,
+               PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
+                               PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low, NULL) == 0,
                                "[EnableDisableSMCFeatures] Attempt to enable SMU features Low failed!",
                                return -EINVAL);
-               PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high) == 0,
+               PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
+                               PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high, NULL) == 0,
                                "[EnableDisableSMCFeatures] Attempt to enable SMU features High failed!",
                                return -EINVAL);
        } else {
-               PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low) == 0,
+               PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
+                               PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low, NULL) == 0,
                                "[EnableDisableSMCFeatures] Attempt to disable SMU features Low failed!",
                                return -EINVAL);
-               PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high) == 0,
+               PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
+                               PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high, NULL) == 0,
                                "[EnableDisableSMCFeatures] Attempt to disable SMU features High failed!",
                                return -EINVAL);
        }
@@ -156,17 +162,17 @@ int vega12_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
        if (features_enabled == NULL)
                return -EINVAL;
 
-       PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr,
-                       PPSMC_MSG_GetEnabledSmuFeaturesLow) == 0,
+       PP_ASSERT_WITH_CODE(smum_send_msg_to_smc(hwmgr,
+                       PPSMC_MSG_GetEnabledSmuFeaturesLow,
+                       &smc_features_low) == 0,
                        "[GetEnabledSMCFeatures] Attempt to get SMU features Low failed!",
                        return -EINVAL);
-       smc_features_low = smu9_get_argument(hwmgr);
 
-       PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr,
-                       PPSMC_MSG_GetEnabledSmuFeaturesHigh) == 0,
+       PP_ASSERT_WITH_CODE(smum_send_msg_to_smc(hwmgr,
+                       PPSMC_MSG_GetEnabledSmuFeaturesHigh,
+                       &smc_features_high) == 0,
                        "[GetEnabledSMCFeatures] Attempt to get SMU features High failed!",
                        return -EINVAL);
-       smc_features_high = smu9_get_argument(hwmgr);
 
        *features_enabled = ((((uint64_t)smc_features_low << SMU_FEATURES_LOW_SHIFT) & SMU_FEATURES_LOW_MASK) |
                        (((uint64_t)smc_features_high << SMU_FEATURES_HIGH_SHIFT) & SMU_FEATURES_HIGH_MASK));
@@ -192,12 +198,14 @@ static int vega12_set_tools_address(struct pp_hwmgr *hwmgr)
                        (struct vega12_smumgr *)(hwmgr->smu_backend);
 
        if (priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr) {
-               if (!smu9_send_msg_to_smc_with_parameter(hwmgr,
+               if (!smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetToolsDramAddrHigh,
-                               upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr)))
-                       smu9_send_msg_to_smc_with_parameter(hwmgr,
+                               upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr),
+                               NULL))
+                       smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_SetToolsDramAddrLow,
-                                       lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr));
+                                       lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr),
+                                       NULL);
        }
        return 0;
 }
index 16aa171971d3aaa6ad81af88d27f53c385a3fdb7..2fb97554134f5aeaf8592e888cafff9cca236458 100644 (file)
@@ -175,18 +175,20 @@ static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr,
        PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
                        "Invalid SMU Table Length!", return -EINVAL);
 
-       PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrHigh,
-                       upper_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0,
+                       upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+                       NULL)) == 0,
                        "[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!",
                        return ret);
-       PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrLow,
-                       lower_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0,
+                       lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+                       NULL)) == 0,
                        "[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!",
                        return ret);
-       PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_TransferTableSmu2Dram, table_id)) == 0,
+       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+                       PPSMC_MSG_TransferTableSmu2Dram, table_id, NULL)) == 0,
                        "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
                        return ret);
 
@@ -224,18 +226,20 @@ static int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr,
 
        amdgpu_asic_flush_hdp(adev, NULL);
 
-       PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrHigh,
-                       upper_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0,
+                       upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+                       NULL)) == 0,
                        "[CopyTableToSMC] Attempt to Set Dram Addr High Failed!",
                        return ret);
-       PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrLow,
-                       lower_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0,
+                       lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+                       NULL)) == 0,
                        "[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!",
                        return ret);
-       PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_TransferTableDram2Smu, table_id)) == 0,
+       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+                       PPSMC_MSG_TransferTableDram2Smu, table_id, NULL)) == 0,
                        "[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!",
                        return ret);
 
@@ -255,18 +259,22 @@ int vega20_set_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
 
        amdgpu_asic_flush_hdp(adev, NULL);
 
-       PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrHigh,
-                       upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0,
+                       upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr),
+                       NULL)) == 0,
                        "[SetActivityMonitor] Attempt to Set Dram Addr High Failed!",
                        return ret);
-       PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrLow,
-                       lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0,
+                       lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr),
+                       NULL)) == 0,
                        "[SetActivityMonitor] Attempt to Set Dram Addr Low Failed!",
                        return ret);
-       PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_TransferTableDram2Smu, TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16))) == 0,
+       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+                       PPSMC_MSG_TransferTableDram2Smu,
+                       TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16),
+                       NULL)) == 0,
                        "[SetActivityMonitor] Attempt to Transfer Table To SMU Failed!",
                        return ret);
 
@@ -281,19 +289,21 @@ int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
        struct amdgpu_device *adev = hwmgr->adev;
        int ret = 0;
 
-       PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrHigh,
-                       upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0,
+                       upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr),
+                       NULL)) == 0,
                        "[GetActivityMonitor] Attempt to Set Dram Addr High Failed!",
                        return ret);
-       PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrLow,
-                       lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0,
+                       lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr),
+                       NULL)) == 0,
                        "[GetActivityMonitor] Attempt to Set Dram Addr Low Failed!",
                        return ret);
-       PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_TransferTableSmu2Dram,
-                       TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16))) == 0,
+                       TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16), NULL)) == 0,
                        "[GetActivityMonitor] Attempt to Transfer Table From SMU Failed!",
                        return ret);
 
@@ -316,21 +326,21 @@ int vega20_enable_smc_features(struct pp_hwmgr *hwmgr,
        smu_features_high = (uint32_t)((feature_mask & SMU_FEATURES_HIGH_MASK) >> SMU_FEATURES_HIGH_SHIFT);
 
        if (enable) {
-               PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low)) == 0,
+               PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+                               PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low, NULL)) == 0,
                                "[EnableDisableSMCFeatures] Attempt to enable SMU features Low failed!",
                                return ret);
-               PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high)) == 0,
+               PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+                               PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high, NULL)) == 0,
                                "[EnableDisableSMCFeatures] Attempt to enable SMU features High failed!",
                                return ret);
        } else {
-               PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low)) == 0,
+               PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+                               PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low, NULL)) == 0,
                                "[EnableDisableSMCFeatures] Attempt to disable SMU features Low failed!",
                                return ret);
-               PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high)) == 0,
+               PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+                               PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high, NULL)) == 0,
                                "[EnableDisableSMCFeatures] Attempt to disable SMU features High failed!",
                                return ret);
        }
@@ -347,16 +357,16 @@ int vega20_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
        if (features_enabled == NULL)
                return -EINVAL;
 
-       PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr,
-                       PPSMC_MSG_GetEnabledSmuFeaturesLow)) == 0,
+       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
+                       PPSMC_MSG_GetEnabledSmuFeaturesLow,
+                       &smc_features_low)) == 0,
                        "[GetEnabledSMCFeatures] Attempt to get SMU features Low failed!",
                        return ret);
-       smc_features_low = vega20_get_argument(hwmgr);
-       PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr,
-                       PPSMC_MSG_GetEnabledSmuFeaturesHigh)) == 0,
+       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
+                       PPSMC_MSG_GetEnabledSmuFeaturesHigh,
+                       &smc_features_high)) == 0,
                        "[GetEnabledSMCFeatures] Attempt to get SMU features High failed!",
                        return ret);
-       smc_features_high = vega20_get_argument(hwmgr);
 
        *features_enabled = ((((uint64_t)smc_features_low << SMU_FEATURES_LOW_SHIFT) & SMU_FEATURES_LOW_MASK) |
                        (((uint64_t)smc_features_high << SMU_FEATURES_HIGH_SHIFT) & SMU_FEATURES_HIGH_MASK));
@@ -371,13 +381,15 @@ static int vega20_set_tools_address(struct pp_hwmgr *hwmgr)
        int ret = 0;
 
        if (priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr) {
-               ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+               ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetToolsDramAddrHigh,
-                               upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr));
+                               upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr),
+                               NULL);
                if (!ret)
-                       ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+                       ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_SetToolsDramAddrLow,
-                                       lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr));
+                                       lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr),
+                                       NULL);
        }
 
        return ret;
@@ -389,14 +401,16 @@ int vega20_set_pptable_driver_address(struct pp_hwmgr *hwmgr)
                        (struct vega20_smumgr *)(hwmgr->smu_backend);
        int ret = 0;
 
-       PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrHigh,
-                       upper_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr))) == 0,
+                       upper_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr),
+                       NULL)) == 0,
                        "[SetPPtabeDriverAddress] Attempt to Set Dram Addr High Failed!",
                        return ret);
-       PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrLow,
-                       lower_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr))) == 0,
+                       lower_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr),
+                       NULL)) == 0,
                        "[SetPPtabeDriverAddress] Attempt to Set Dram Addr Low Failed!",
                        return ret);
 
index b0e0d67cd54b3e72987f37604326e6f0f48e9085..3da71a088b925f0d93a37661366b3c0b5fbaadfb 100644 (file)
@@ -356,7 +356,8 @@ static int vegam_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
                        PHM_PlatformCaps_StablePState))
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_UVDDPM_SetEnabledMask,
-                               (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+                               (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel),
+                               NULL);
        return 0;
 }
 
@@ -388,7 +389,8 @@ static int vegam_update_vce_smc_table(struct pp_hwmgr *hwmgr)
        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_VCEDPM_SetEnabledMask,
-                               (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+                               (uint32_t)1 << smu_data->smc_state_table.VceBootLevel,
+                               NULL);
        return 0;
 }
 
@@ -1906,7 +1908,8 @@ static int vegam_enable_reconfig_cus(struct pp_hwmgr *hwmgr)
 
        smum_send_msg_to_smc_with_parameter(hwmgr,
                                            PPSMC_MSG_EnableModeSwitchRLCNotification,
-                                           adev->gfx.cu_info.number);
+                                           adev->gfx.cu_info.number,
+                                           NULL);
 
        return 0;
 }
@@ -2060,7 +2063,7 @@ static int vegam_init_smc_table(struct pp_hwmgr *hwmgr)
                table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
                if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
                                PHM_PlatformCaps_AutomaticDCTransition) &&
-                               !smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UseNewGPIOScheme))
+                               !smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UseNewGPIOScheme, NULL))
                        phm_cap_set(hwmgr->platform_descriptor.platformCaps,
                                        PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
        } else {
@@ -2250,10 +2253,12 @@ int vegam_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
        if (!hwmgr->avfs_supported)
                return 0;
 
-       ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
+       ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs, NULL);
        if (!ret) {
                if (data->apply_avfs_cks_off_voltage)
-                       ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage);
+                       ret = smum_send_msg_to_smc(hwmgr,
+                                       PPSMC_MSG_ApplyAvfsCksOffVoltage,
+                                       NULL);
        }
 
        return ret;
@@ -2279,6 +2284,7 @@ const struct pp_smumgr_func vegam_smu_funcs = {
        .request_smu_load_specific_fw = NULL,
        .send_msg_to_smc = smu7_send_msg_to_smc,
        .send_msg_to_smc_with_parameter = smu7_send_msg_to_smc_with_parameter,
+       .get_argument = smu7_get_argument,
        .process_firmware_header = vegam_process_firmware_header,
        .is_dpm_running = vegam_is_dpm_running,
        .get_mac_definition = vegam_get_mac_definition,
index 3f1044326dcb6b59d2b66e4f48864715adc2a3d5..61923530b2e4e1439b4c423fabef4064371ab769 100644 (file)
@@ -1796,7 +1796,7 @@ static int vega20_get_power_profile_mode(struct smu_context *smu, char *buf)
                        "PD_Data_error_rate_coeff"};
        int result = 0;
 
-       if (!smu->pm_enabled || !buf)
+       if (!buf)
                return -EINVAL;
 
        size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
@@ -1887,8 +1887,6 @@ static int vega20_set_power_profile_mode(struct smu_context *smu, long *input, u
 
        smu->power_profile_mode = input[size];
 
-       if (!smu->pm_enabled)
-               return ret;
        if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
                pr_err("Invalid power profile mode %d\n", smu->power_profile_mode);
                return -EINVAL;
index d6a6692db0acbdb645b56e910074b7dce852bcc4..c05d001163e0e8e6b63fc091318b5be632afc9a7 100644 (file)
@@ -137,10 +137,11 @@ static struct drm_info_list arcpgu_debugfs_list[] = {
        { "clocks", arcpgu_show_pxlclock, 0 },
 };
 
-static int arcpgu_debugfs_init(struct drm_minor *minor)
+static void arcpgu_debugfs_init(struct drm_minor *minor)
 {
-       return drm_debugfs_create_files(arcpgu_debugfs_list,
-               ARRAY_SIZE(arcpgu_debugfs_list), minor->debugfs_root, minor);
+       drm_debugfs_create_files(arcpgu_debugfs_list,
+                                ARRAY_SIZE(arcpgu_debugfs_list),
+                                minor->debugfs_root, minor);
 }
 #endif
 
index 442d4656150ae664b8e1ebc3a511beec83505126..6b85d5f4caa854fd4b3484f562fdb67928ad0222 100644 (file)
@@ -14,6 +14,7 @@
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_irq.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_vblank.h>
 
@@ -260,17 +261,16 @@ static void komeda_kms_mode_config_init(struct komeda_kms_dev *kms,
 
 struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev)
 {
-       struct komeda_kms_dev *kms = kzalloc(sizeof(*kms), GFP_KERNEL);
+       struct komeda_kms_dev *kms;
        struct drm_device *drm;
        int err;
 
-       if (!kms)
-               return ERR_PTR(-ENOMEM);
+       kms = devm_drm_dev_alloc(mdev->dev, &komeda_kms_driver,
+                                struct komeda_kms_dev, base);
+       if (IS_ERR(kms))
+               return kms;
 
        drm = &kms->base;
-       err = drm_dev_init(drm, &komeda_kms_driver, mdev->dev);
-       if (err)
-               goto free_kms;
 
        drm->dev_private = mdev;
 
@@ -327,9 +327,6 @@ cleanup_mode_config:
        drm_mode_config_cleanup(drm);
        komeda_kms_cleanup_private_objs(kms);
        drm->dev_private = NULL;
-       drm_dev_put(drm);
-free_kms:
-       kfree(kms);
        return ERR_PTR(err);
 }
 
@@ -346,5 +343,4 @@ void komeda_kms_detach(struct komeda_kms_dev *kms)
        drm_mode_config_cleanup(drm);
        komeda_kms_cleanup_private_objs(kms);
        drm->dev_private = NULL;
-       drm_dev_put(drm);
 }
index 2e053815b54aa890f677ffda5332c2555b47db65..194419f47c5e5d71e7f61d9f9b99b963c95637a9 100644 (file)
@@ -224,10 +224,11 @@ static struct drm_info_list hdlcd_debugfs_list[] = {
        { "clocks", hdlcd_show_pxlclock, 0 },
 };
 
-static int hdlcd_debugfs_init(struct drm_minor *minor)
+static void hdlcd_debugfs_init(struct drm_minor *minor)
 {
-       return drm_debugfs_create_files(hdlcd_debugfs_list,
-               ARRAY_SIZE(hdlcd_debugfs_list), minor->debugfs_root, minor);
+       drm_debugfs_create_files(hdlcd_debugfs_list,
+                                ARRAY_SIZE(hdlcd_debugfs_list),
+                                minor->debugfs_root, minor);
 }
 #endif
 
index 37d92a06318e4d4f8e29540d90dbcec49c3afafb..def8c9ffafcaf5ad4f95438af5178ba0b96e712c 100644 (file)
@@ -548,7 +548,7 @@ static const struct file_operations malidp_debugfs_fops = {
        .release = single_release,
 };
 
-static int malidp_debugfs_init(struct drm_minor *minor)
+static void malidp_debugfs_init(struct drm_minor *minor)
 {
        struct malidp_drm *malidp = minor->dev->dev_private;
 
@@ -557,7 +557,6 @@ static int malidp_debugfs_init(struct drm_minor *minor)
        spin_lock_init(&malidp->errors_lock);
        debugfs_create_file("debug", S_IRUGO | S_IWUSR, minor->debugfs_root,
                            minor->dev, &malidp_debugfs_fops);
-       return 0;
 }
 
 #endif //CONFIG_DEBUG_FS
index 5232f81c16a57579e672e15894f98c2274428efb..5fc25c3f445c22be12ac4a53df43248584366d6f 100644 (file)
@@ -12,6 +12,7 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_drv.h>
 #include <drm/drm_ioctl.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_prime.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_fb_helper.h>
@@ -103,6 +104,7 @@ static int armada_drm_bind(struct device *dev)
                kfree(priv);
                return ret;
        }
+       drmm_add_final_kfree(&priv->drm, priv);
 
        /* Remove early framebuffers */
        ret = drm_fb_helper_remove_conflicting_framebuffers(NULL,
index a10358bb61ec4578cd84f9c526f7196a5a3e866d..e7ca95827ae89658870223337887adda545b51e9 100644 (file)
@@ -5,6 +5,7 @@
 #include <drm/drm_simple_kms_helper.h>
 
 struct aspeed_gfx {
+       struct drm_device               drm;
        void __iomem                    *base;
        struct clk                      *clk;
        struct reset_control            *rst;
@@ -12,8 +13,8 @@ struct aspeed_gfx {
 
        struct drm_simple_display_pipe  pipe;
        struct drm_connector            connector;
-       struct drm_fbdev_cma            *fbdev;
 };
+#define to_aspeed_gfx(x) container_of(x, struct aspeed_gfx, drm)
 
 int aspeed_gfx_create_pipe(struct drm_device *drm);
 int aspeed_gfx_create_output(struct drm_device *drm);
index 2184b8be6fd4f1db8be6f451825787a2e81a25a8..e54686c31a90decf023d70f812a112302495bfc4 100644 (file)
@@ -231,7 +231,7 @@ static const uint32_t aspeed_gfx_formats[] = {
 
 int aspeed_gfx_create_pipe(struct drm_device *drm)
 {
-       struct aspeed_gfx *priv = drm->dev_private;
+       struct aspeed_gfx *priv = to_aspeed_gfx(drm);
 
        return drm_simple_display_pipe_init(drm, &priv->pipe, &aspeed_gfx_funcs,
                                            aspeed_gfx_formats,
index ada2f6aca906c9a72ab9b68d8770935933068933..6b27242b9ee3c1a55b856bb58d456c5307e534a3 100644 (file)
@@ -77,7 +77,7 @@ static void aspeed_gfx_setup_mode_config(struct drm_device *drm)
 static irqreturn_t aspeed_gfx_irq_handler(int irq, void *data)
 {
        struct drm_device *drm = data;
-       struct aspeed_gfx *priv = drm->dev_private;
+       struct aspeed_gfx *priv = to_aspeed_gfx(drm);
        u32 reg;
 
        reg = readl(priv->base + CRT_CTRL1);
@@ -96,15 +96,10 @@ static irqreturn_t aspeed_gfx_irq_handler(int irq, void *data)
 static int aspeed_gfx_load(struct drm_device *drm)
 {
        struct platform_device *pdev = to_platform_device(drm->dev);
-       struct aspeed_gfx *priv;
+       struct aspeed_gfx *priv = to_aspeed_gfx(drm);
        struct resource *res;
        int ret;
 
-       priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
-       if (!priv)
-               return -ENOMEM;
-       drm->dev_private = priv;
-
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        priv->base = devm_ioremap_resource(drm->dev, res);
        if (IS_ERR(priv->base))
@@ -187,8 +182,6 @@ static void aspeed_gfx_unload(struct drm_device *drm)
 {
        drm_kms_helper_poll_fini(drm);
        drm_mode_config_cleanup(drm);
-
-       drm->dev_private = NULL;
 }
 
 DEFINE_DRM_GEM_CMA_FOPS(fops);
@@ -216,27 +209,26 @@ static const struct of_device_id aspeed_gfx_match[] = {
 
 static int aspeed_gfx_probe(struct platform_device *pdev)
 {
-       struct drm_device *drm;
+       struct aspeed_gfx *priv;
        int ret;
 
-       drm = drm_dev_alloc(&aspeed_gfx_driver, &pdev->dev);
-       if (IS_ERR(drm))
-               return PTR_ERR(drm);
+       priv = devm_drm_dev_alloc(&pdev->dev, &aspeed_gfx_driver,
+                                 struct aspeed_gfx, drm);
+       if (IS_ERR(priv))
+               return PTR_ERR(priv);
 
-       ret = aspeed_gfx_load(drm);
+       ret = aspeed_gfx_load(&priv->drm);
        if (ret)
-               goto err_free;
+               return ret;
 
-       ret = drm_dev_register(drm, 0);
+       ret = drm_dev_register(&priv->drm, 0);
        if (ret)
                goto err_unload;
 
        return 0;
 
 err_unload:
-       aspeed_gfx_unload(drm);
-err_free:
-       drm_dev_put(drm);
+       aspeed_gfx_unload(&priv->drm);
 
        return ret;
 }
@@ -247,7 +239,6 @@ static int aspeed_gfx_remove(struct platform_device *pdev)
 
        drm_dev_unregister(drm);
        aspeed_gfx_unload(drm);
-       drm_dev_put(drm);
 
        return 0;
 }
index 67ee5fa10055b02c1984483b2a6eb5fcb88067dc..6759cb88415a4ca3e944ed64869c2e7963ef1833 100644 (file)
@@ -28,7 +28,7 @@ static const struct drm_connector_funcs aspeed_gfx_connector_funcs = {
 
 int aspeed_gfx_create_output(struct drm_device *drm)
 {
-       struct aspeed_gfx *priv = drm->dev_private;
+       struct aspeed_gfx *priv = to_aspeed_gfx(drm);
        int ret;
 
        priv->connector.dpms = DRM_MODE_DPMS_OFF;
index 30aa73a5d9b723f3400e330c7e681487d67bd9aa..b7ba22dddcad98c8c3056c978b5d239a7b3ce9b2 100644 (file)
@@ -32,6 +32,7 @@
 
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
 #include <drm/drm_gem_vram_helper.h>
 #include <drm/drm_probe_helper.h>
 
@@ -111,6 +112,8 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (ret)
                goto err_ast_driver_unload;
 
+       drm_fbdev_generic_setup(dev, 32);
+
        return 0;
 
 err_ast_driver_unload:
index 18a0a4ce00f6e0b823fd24f080085e747049d73e..e5398e3dabe708fe8f778e9b574da4a584d47ed3 100644 (file)
@@ -30,7 +30,6 @@
 
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_helper.h>
 #include <drm/drm_gem.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_gem_vram_helper.h>
@@ -512,10 +511,6 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags)
 
        drm_mode_config_reset(dev);
 
-       ret = drm_fbdev_generic_setup(dev, 32);
-       if (ret)
-               goto out_free;
-
        return 0;
 out_free:
        kfree(ast);
index cdd6c46d6557d529d473cee8540f29e2d4cae603..7d39b858c9f1fbbfe68b6d2f0ad353f46b9ed99a 100644 (file)
@@ -561,8 +561,9 @@ static int ast_primary_plane_helper_atomic_check(struct drm_plane *plane,
        return 0;
 }
 
-void ast_primary_plane_helper_atomic_update(struct drm_plane *plane,
-                                           struct drm_plane_state *old_state)
+static void
+ast_primary_plane_helper_atomic_update(struct drm_plane *plane,
+                                      struct drm_plane_state *old_state)
 {
        struct ast_private *ast = plane->dev->dev_private;
        struct drm_plane_state *state = plane->state;
@@ -801,6 +802,9 @@ static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc,
                return -EINVAL;
        }
 
+       if (!state->enable)
+               return 0; /* no mode checks if CRTC is being disabled */
+
        ast_state = to_ast_crtc_state(state);
 
        format = ast_state->format;
@@ -881,6 +885,17 @@ static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = {
        .atomic_disable = ast_crtc_helper_atomic_disable,
 };
 
+static void ast_crtc_reset(struct drm_crtc *crtc)
+{
+       struct ast_crtc_state *ast_state =
+               kzalloc(sizeof(*ast_state), GFP_KERNEL);
+
+       if (crtc->state)
+               crtc->funcs->atomic_destroy_state(crtc, crtc->state);
+
+       __drm_atomic_helper_crtc_reset(crtc, &ast_state->base);
+}
+
 static void ast_crtc_destroy(struct drm_crtc *crtc)
 {
        drm_crtc_cleanup(crtc);
@@ -919,8 +934,7 @@ static void ast_crtc_atomic_destroy_state(struct drm_crtc *crtc,
 }
 
 static const struct drm_crtc_funcs ast_crtc_funcs = {
-       .reset = drm_atomic_helper_crtc_reset,
-       .set_config = drm_crtc_helper_set_config,
+       .reset = ast_crtc_reset,
        .gamma_set = drm_atomic_helper_legacy_gamma_set,
        .destroy = ast_crtc_destroy,
        .set_config = drm_atomic_helper_set_config,
@@ -1069,7 +1083,6 @@ static void ast_connector_destroy(struct drm_connector *connector)
 {
        struct ast_connector *ast_connector = to_ast_connector(connector);
        ast_i2c_destroy(ast_connector->i2c);
-       drm_connector_unregister(connector);
        drm_connector_cleanup(connector);
        kfree(connector);
 }
@@ -1112,8 +1125,6 @@ static int ast_connector_init(struct drm_device *dev)
        connector->interlace_allowed = 0;
        connector->doublescan_allowed = 0;
 
-       drm_connector_register(connector);
-
        connector->polled = DRM_CONNECTOR_POLL_CONNECT;
 
        encoder = list_first_entry(&dev->mode_config.encoder_list, struct drm_encoder, head);
index e2019fe97fff5b6bfd0864f6bd22dbee03f60721..43bc709e35237d8d9158f729f0d09ed29592e799 100644 (file)
 #include <linux/media-bus-format.h>
 #include <linux/of_graph.h>
 
+#include <drm/drm_bridge.h>
 #include <drm/drm_encoder.h>
 #include <drm/drm_of.h>
-#include <drm/drm_bridge.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "atmel_hlcdc_dc.h"
 
@@ -22,10 +23,6 @@ struct atmel_hlcdc_rgb_output {
        int bus_fmt;
 };
 
-static const struct drm_encoder_funcs atmel_hlcdc_panel_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static struct atmel_hlcdc_rgb_output *
 atmel_hlcdc_encoder_to_rgb_output(struct drm_encoder *encoder)
 {
@@ -98,9 +95,8 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, int endpoint)
                return -EINVAL;
        }
 
-       ret = drm_encoder_init(dev, &output->encoder,
-                              &atmel_hlcdc_panel_encoder_funcs,
-                              DRM_MODE_ENCODER_NONE, NULL);
+       ret = drm_simple_encoder_init(dev, &output->encoder,
+                                     DRM_MODE_ENCODER_NONE);
        if (ret)
                return ret;
 
index 917767173ee6836deea7b5e2bfa704c0aa77ea2d..e5bd1d517a18d09398b6408e98fdbaf467c370c5 100644 (file)
@@ -92,7 +92,6 @@ void bochs_mm_fini(struct bochs_device *bochs);
 
 /* bochs_kms.c */
 int bochs_kms_init(struct bochs_device *bochs);
-void bochs_kms_fini(struct bochs_device *bochs);
 
 /* bochs_fbdev.c */
 extern const struct drm_mode_config_funcs bochs_mode_funcs;
index addb0568c1affef3e5cffb9d53783006d13e0864..e18c51de11969b370da55a63a2b3ff11fa982f1a 100644 (file)
@@ -7,6 +7,7 @@
 
 #include <drm/drm_drv.h>
 #include <drm/drm_atomic_helper.h>
+#include <drm/drm_managed.h>
 
 #include "bochs.h"
 
@@ -21,10 +22,7 @@ static void bochs_unload(struct drm_device *dev)
 {
        struct bochs_device *bochs = dev->dev_private;
 
-       bochs_kms_fini(bochs);
        bochs_mm_fini(bochs);
-       kfree(bochs);
-       dev->dev_private = NULL;
 }
 
 static int bochs_load(struct drm_device *dev)
@@ -32,7 +30,7 @@ static int bochs_load(struct drm_device *dev)
        struct bochs_device *bochs;
        int ret;
 
-       bochs = kzalloc(sizeof(*bochs), GFP_KERNEL);
+       bochs = drmm_kzalloc(dev, sizeof(*bochs), GFP_KERNEL);
        if (bochs == NULL)
                return -ENOMEM;
        dev->dev_private = bochs;
index 8066d7d370d5b5cf50932e6cfcecfa6762c19347..05d8373888e81d48250c618f2e821555436062c1 100644 (file)
@@ -104,7 +104,6 @@ static void bochs_connector_init(struct drm_device *dev)
                           DRM_MODE_CONNECTOR_VIRTUAL);
        drm_connector_helper_add(connector,
                                 &bochs_connector_connector_helper_funcs);
-       drm_connector_register(connector);
 
        bochs_hw_load_edid(bochs);
        if (bochs->edid) {
@@ -134,7 +133,11 @@ const struct drm_mode_config_funcs bochs_mode_funcs = {
 
 int bochs_kms_init(struct bochs_device *bochs)
 {
-       drm_mode_config_init(bochs->dev);
+       int ret;
+
+       ret = drmm_mode_config_init(bochs->dev);
+       if (ret)
+               return ret;
 
        bochs->dev->mode_config.max_width = 8192;
        bochs->dev->mode_config.max_height = 8192;
@@ -160,12 +163,3 @@ int bochs_kms_init(struct bochs_device *bochs)
 
        return 0;
 }
-
-void bochs_kms_fini(struct bochs_device *bochs)
-{
-       if (!bochs->dev->mode_config.num_connector)
-               return;
-
-       drm_atomic_helper_shutdown(bochs->dev);
-       drm_mode_config_cleanup(bochs->dev);
-}
index aaed2347ace9d8112ae508f6e9ee93b1027e8468..04f876e985deafd9f369e65d2645e855cb90b77c 100644 (file)
@@ -27,6 +27,16 @@ config DRM_CDNS_DSI
          Support Cadence DPI to DSI bridge. This is an internal
          bridge and is meant to be directly embedded in a SoC.
 
+config DRM_CHRONTEL_CH7033
+       tristate "Chrontel CH7033 Video Encoder"
+       depends on OF
+       select DRM_KMS_HELPER
+       help
+         Enable support for the Chrontel CH7033 VGA/DVI/HDMI Encoder, as
+         found in the Dell Wyse 3020 thin client.
+
+         If in doubt, say "N".
+
 config DRM_DISPLAY_CONNECTOR
        tristate "Display connector support"
        depends on OF
@@ -58,6 +68,22 @@ config DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW
          to DP++. This is used with the i.MX6 imx-ldb
          driver. You are likely to say N here.
 
+config DRM_NWL_MIPI_DSI
+       tristate "Northwest Logic MIPI DSI Host controller"
+       depends on DRM
+       depends on COMMON_CLK
+       depends on OF && HAS_IOMEM
+       select DRM_KMS_HELPER
+       select DRM_MIPI_DSI
+       select DRM_PANEL_BRIDGE
+       select GENERIC_PHY_MIPI_DPHY
+       select MFD_SYSCON
+       select MULTIPLEXER
+       select REGMAP_MMIO
+       help
+         This enables the Northwest Logic MIPI DSI Host controller as
+         for example found on NXP's i.MX8 Processors.
+
 config DRM_NXP_PTN3460
        tristate "NXP PTN3460 DP/LVDS bridge"
        depends on OF
index 6fb062b5b0f04e493dcb6cf6a89cefc10ff3ddad..d63d4b7e434733f6fb8cbea58edbf6b8c168bf98 100644 (file)
@@ -1,5 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 obj-$(CONFIG_DRM_CDNS_DSI) += cdns-dsi.o
+obj-$(CONFIG_DRM_CHRONTEL_CH7033) += chrontel-ch7033.o
 obj-$(CONFIG_DRM_DISPLAY_CONNECTOR) += display-connector.o
 obj-$(CONFIG_DRM_LVDS_CODEC) += lvds-codec.o
 obj-$(CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW) += megachips-stdpxxxx-ge-b850v3-fw.o
@@ -18,6 +19,7 @@ obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/
 obj-$(CONFIG_DRM_TI_SN65DSI86) += ti-sn65dsi86.o
 obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o
 obj-$(CONFIG_DRM_TI_TPD12S015) += ti-tpd12s015.o
+obj-$(CONFIG_DRM_NWL_MIPI_DSI) += nwl-dsi.o
 
 obj-y += analogix/
 obj-y += synopsys/
index 47d4eb9e845d085cedd1bf8c9ddb9a1bca92fd8d..f46a5e26b5dd640670afa21802f9019d5c7439fb 100644 (file)
@@ -6,7 +6,7 @@ config DRM_I2C_ADV7511
        select REGMAP_I2C
        select DRM_MIPI_DSI
        help
-         Support for the Analog Device ADV7511(W)/13/33/35 HDMI encoders.
+         Support for the Analog Devices ADV7511(W)/13/33/35 HDMI encoders.
 
 config DRM_I2C_ADV7511_AUDIO
        bool "ADV7511 HDMI Audio driver"
index a428185be2c1576c6bf68e9d55a80ee470795c41..f101dd2819b5290dbb2992c3d04aea0432c14182 100644 (file)
@@ -19,13 +19,15 @@ static void adv7511_calc_cts_n(unsigned int f_tmds, unsigned int fs,
 {
        switch (fs) {
        case 32000:
-               *n = 4096;
+       case 48000:
+       case 96000:
+       case 192000:
+               *n = fs * 128 / 1000;
                break;
        case 44100:
-               *n = 6272;
-               break;
-       case 48000:
-               *n = 6144;
+       case 88200:
+       case 176400:
+               *n = fs * 128 / 900;
                break;
        }
 
@@ -119,6 +121,9 @@ int adv7511_hdmi_hw_params(struct device *dev, void *data,
                audio_source = ADV7511_AUDIO_SOURCE_I2S;
                i2s_format = ADV7511_I2S_FORMAT_LEFT_J;
                break;
+       case HDMI_SPDIF:
+               audio_source = ADV7511_AUDIO_SOURCE_SPDIF;
+               break;
        default:
                return -EINVAL;
        }
@@ -175,11 +180,21 @@ static int audio_startup(struct device *dev, void *data)
        /* use Audio infoframe updated info */
        regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(1),
                                BIT(5), 0);
+       /* enable SPDIF receiver */
+       if (adv7511->audio_source == ADV7511_AUDIO_SOURCE_SPDIF)
+               regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
+                                  BIT(7), BIT(7));
+
        return 0;
 }
 
 static void audio_shutdown(struct device *dev, void *data)
 {
+       struct adv7511 *adv7511 = dev_get_drvdata(dev);
+
+       if (adv7511->audio_source == ADV7511_AUDIO_SOURCE_SPDIF)
+               regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
+                                  BIT(7), 0);
 }
 
 static int adv7511_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
@@ -213,6 +228,7 @@ static const struct hdmi_codec_pdata codec_data = {
        .ops = &adv7511_codec_ops,
        .max_i2s_channels = 2,
        .i2s = 1,
+       .spdif = 1,
 };
 
 int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511)
diff --git a/drivers/gpu/drm/bridge/chrontel-ch7033.c b/drivers/gpu/drm/bridge/chrontel-ch7033.c
new file mode 100644 (file)
index 0000000..f8675d8
--- /dev/null
@@ -0,0 +1,620 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Chrontel CH7033 Video Encoder Driver
+ *
+ * Copyright (C) 2019,2020 Lubomir Rintel
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_of.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+/* Page 0, Register 0x07 */
+enum {
+       DRI_PD          = BIT(3),
+       IO_PD           = BIT(5),
+};
+
+/* Page 0, Register 0x08 */
+enum {
+       DRI_PDDRI       = GENMASK(7, 4),
+       PDDAC           = GENMASK(3, 1),
+       PANEN           = BIT(0),
+};
+
+/* Page 0, Register 0x09 */
+enum {
+       DPD             = BIT(7),
+       GCKOFF          = BIT(6),
+       TV_BP           = BIT(5),
+       SCLPD           = BIT(4),
+       SDPD            = BIT(3),
+       VGA_PD          = BIT(2),
+       HDBKPD          = BIT(1),
+       HDMI_PD         = BIT(0),
+};
+
+/* Page 0, Register 0x0a */
+enum {
+       MEMINIT         = BIT(7),
+       MEMIDLE         = BIT(6),
+       MEMPD           = BIT(5),
+       STOP            = BIT(4),
+       LVDS_PD         = BIT(3),
+       HD_DVIB         = BIT(2),
+       HDCP_PD         = BIT(1),
+       MCU_PD          = BIT(0),
+};
+
+/* Page 0, Register 0x18 */
+enum {
+       IDF             = GENMASK(7, 4),
+       INTEN           = BIT(3),
+       SWAP            = GENMASK(2, 0),
+};
+
+enum {
+       BYTE_SWAP_RGB   = 0,
+       BYTE_SWAP_RBG   = 1,
+       BYTE_SWAP_GRB   = 2,
+       BYTE_SWAP_GBR   = 3,
+       BYTE_SWAP_BRG   = 4,
+       BYTE_SWAP_BGR   = 5,
+};
+
+/* Page 0, Register 0x19 */
+enum {
+       HPO_I           = BIT(5),
+       VPO_I           = BIT(4),
+       DEPO_I          = BIT(3),
+       CRYS_EN         = BIT(2),
+       GCLKFREQ        = GENMASK(2, 0),
+};
+
+/* Page 0, Register 0x2e */
+enum {
+       HFLIP           = BIT(7),
+       VFLIP           = BIT(6),
+       DEPO_O          = BIT(5),
+       HPO_O           = BIT(4),
+       VPO_O           = BIT(3),
+       TE              = GENMASK(2, 0),
+};
+
+/* Page 0, Register 0x2b */
+enum {
+       SWAPS           = GENMASK(7, 4),
+       VFMT            = GENMASK(3, 0),
+};
+
+/* Page 0, Register 0x54 */
+enum {
+       COMP_BP         = BIT(7),
+       DAC_EN_T        = BIT(6),
+       HWO_HDMI_HI     = GENMASK(5, 3),
+       HOO_HDMI_HI     = GENMASK(2, 0),
+};
+
+/* Page 0, Register 0x57 */
+enum {
+       FLDSEN          = BIT(7),
+       VWO_HDMI_HI     = GENMASK(5, 3),
+       VOO_HDMI_HI     = GENMASK(2, 0),
+};
+
+/* Page 0, Register 0x7e */
+enum {
+       HDMI_LVDS_SEL   = BIT(7),
+       DE_GEN          = BIT(6),
+       PWM_INDEX_HI    = BIT(5),
+       USE_DE          = BIT(4),
+       R_INT           = GENMASK(3, 0),
+};
+
+/* Page 1, Register 0x07 */
+enum {
+       BPCKSEL         = BIT(7),
+       DRI_CMFB_EN     = BIT(6),
+       CEC_PUEN        = BIT(5),
+       CEC_T           = BIT(3),
+       CKINV           = BIT(2),
+       CK_TVINV        = BIT(1),
+       DRI_CKS2        = BIT(0),
+};
+
+/* Page 1, Register 0x08 */
+enum {
+       DACG            = BIT(6),
+       DACKTST         = BIT(5),
+       DEDGEB          = BIT(4),
+       SYO             = BIT(3),
+       DRI_IT_LVDS     = GENMASK(2, 1),
+       DISPON          = BIT(0),
+};
+
+/* Page 1, Register 0x0c */
+enum {
+       DRI_PLL_CP      = GENMASK(7, 6),
+       DRI_PLL_DIVSEL  = BIT(5),
+       DRI_PLL_N1_1    = BIT(4),
+       DRI_PLL_N1_0    = BIT(3),
+       DRI_PLL_N3_1    = BIT(2),
+       DRI_PLL_N3_0    = BIT(1),
+       DRI_PLL_CKTSTEN = BIT(0),
+};
+
+/* Page 1, Register 0x6b */
+enum {
+       VCO3CS          = GENMASK(7, 6),
+       ICPGBK2_0       = GENMASK(5, 3),
+       DRI_VCO357SC    = BIT(2),
+       PDPLL2          = BIT(1),
+       DRI_PD_SER      = BIT(0),
+};
+
+/* Page 1, Register 0x6c */
+enum {
+       PLL2N11         = GENMASK(7, 4),
+       PLL2N5_4        = BIT(3),
+       PLL2N5_TOP      = BIT(2),
+       DRI_PLL_PD      = BIT(1),
+       PD_I2CM         = BIT(0),
+};
+
+/* Page 3, Register 0x28 */
+enum {
+       DIFF_EN         = GENMASK(7, 6),
+       CORREC_EN       = GENMASK(5, 4),
+       VGACLK_BP       = BIT(3),
+       HM_LV_SEL       = BIT(2),
+       HD_VGA_SEL      = BIT(1),
+};
+
+/* Page 3, Register 0x2a */
+enum {
+       LVDSCLK_BP      = BIT(7),
+       HDTVCLK_BP      = BIT(6),
+       HDMICLK_BP      = BIT(5),
+       HDTV_BP         = BIT(4),
+       HDMI_BP         = BIT(3),
+       THRWL           = GENMASK(2, 0),
+};
+
+/* Page 4, Register 0x52 */
+enum {
+       PGM_ARSTB       = BIT(7),
+       MCU_ARSTB       = BIT(6),
+       MCU_RETB        = BIT(2),
+       RESETIB         = BIT(1),
+       RESETDB         = BIT(0),
+};
+
+struct ch7033_priv {
+       struct regmap *regmap;
+       struct drm_bridge *next_bridge;
+       struct drm_bridge bridge;
+       struct drm_connector connector;
+};
+
+#define conn_to_ch7033_priv(x) \
+       container_of(x, struct ch7033_priv, connector)
+#define bridge_to_ch7033_priv(x) \
+       container_of(x, struct ch7033_priv, bridge)
+
+
+static enum drm_connector_status ch7033_connector_detect(
+       struct drm_connector *connector, bool force)
+{
+       struct ch7033_priv *priv = conn_to_ch7033_priv(connector);
+
+       return drm_bridge_detect(priv->next_bridge);
+}
+
+static const struct drm_connector_funcs ch7033_connector_funcs = {
+       .reset = drm_atomic_helper_connector_reset,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .detect = ch7033_connector_detect,
+       .destroy = drm_connector_cleanup,
+       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int ch7033_connector_get_modes(struct drm_connector *connector)
+{
+       struct ch7033_priv *priv = conn_to_ch7033_priv(connector);
+       struct edid *edid;
+       int ret;
+
+       edid = drm_bridge_get_edid(priv->next_bridge, connector);
+       drm_connector_update_edid_property(connector, edid);
+       if (edid) {
+               ret = drm_add_edid_modes(connector, edid);
+               kfree(edid);
+       } else {
+               ret = drm_add_modes_noedid(connector, 1920, 1080);
+               drm_set_preferred_mode(connector, 1024, 768);
+       }
+
+       return ret;
+}
+
+static struct drm_encoder *ch7033_connector_best_encoder(
+                       struct drm_connector *connector)
+{
+       struct ch7033_priv *priv = conn_to_ch7033_priv(connector);
+
+       return priv->bridge.encoder;
+}
+
+static const struct drm_connector_helper_funcs ch7033_connector_helper_funcs = {
+       .get_modes = ch7033_connector_get_modes,
+       .best_encoder = ch7033_connector_best_encoder,
+};
+
+static void ch7033_hpd_event(void *arg, enum drm_connector_status status)
+{
+       struct ch7033_priv *priv = arg;
+
+       if (priv->bridge.dev)
+               drm_helper_hpd_irq_event(priv->connector.dev);
+}
+
+static int ch7033_bridge_attach(struct drm_bridge *bridge,
+                               enum drm_bridge_attach_flags flags)
+{
+       struct ch7033_priv *priv = bridge_to_ch7033_priv(bridge);
+       struct drm_connector *connector = &priv->connector;
+       int ret;
+
+       ret = drm_bridge_attach(bridge->encoder, priv->next_bridge, bridge,
+                               DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+       if (ret)
+               return ret;
+
+       if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
+               return 0;
+
+       if (priv->next_bridge->ops & DRM_BRIDGE_OP_DETECT) {
+               connector->polled = DRM_CONNECTOR_POLL_HPD;
+       } else {
+               connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+                                   DRM_CONNECTOR_POLL_DISCONNECT;
+       }
+
+       if (priv->next_bridge->ops & DRM_BRIDGE_OP_HPD) {
+               drm_bridge_hpd_enable(priv->next_bridge, ch7033_hpd_event,
+                                     priv);
+       }
+
+       drm_connector_helper_add(connector,
+                                &ch7033_connector_helper_funcs);
+       ret = drm_connector_init_with_ddc(bridge->dev, &priv->connector,
+                                         &ch7033_connector_funcs,
+                                         priv->next_bridge->type,
+                                         priv->next_bridge->ddc);
+       if (ret) {
+               DRM_ERROR("Failed to initialize connector\n");
+               return ret;
+       }
+
+       return drm_connector_attach_encoder(&priv->connector, bridge->encoder);
+}
+
+static void ch7033_bridge_detach(struct drm_bridge *bridge)
+{
+       struct ch7033_priv *priv = bridge_to_ch7033_priv(bridge);
+
+       if (priv->next_bridge->ops & DRM_BRIDGE_OP_HPD)
+               drm_bridge_hpd_disable(priv->next_bridge);
+       drm_connector_cleanup(&priv->connector);
+}
+
+static enum drm_mode_status ch7033_bridge_mode_valid(struct drm_bridge *bridge,
+                                    const struct drm_display_mode *mode)
+{
+       if (mode->clock > 165000)
+               return MODE_CLOCK_HIGH;
+       if (mode->hdisplay >= 1920)
+               return MODE_BAD_HVALUE;
+       if (mode->vdisplay >= 1080)
+               return MODE_BAD_VVALUE;
+       return MODE_OK;
+}
+
+static void ch7033_bridge_disable(struct drm_bridge *bridge)
+{
+       struct ch7033_priv *priv = bridge_to_ch7033_priv(bridge);
+
+       regmap_write(priv->regmap, 0x03, 0x04);
+       regmap_update_bits(priv->regmap, 0x52, RESETDB, 0x00);
+}
+
+static void ch7033_bridge_enable(struct drm_bridge *bridge)
+{
+       struct ch7033_priv *priv = bridge_to_ch7033_priv(bridge);
+
+       regmap_write(priv->regmap, 0x03, 0x04);
+       regmap_update_bits(priv->regmap, 0x52, RESETDB, RESETDB);
+}
+
+static void ch7033_bridge_mode_set(struct drm_bridge *bridge,
+                                  const struct drm_display_mode *mode,
+                                  const struct drm_display_mode *adjusted_mode)
+{
+       struct ch7033_priv *priv = bridge_to_ch7033_priv(bridge);
+       int hbporch = mode->hsync_start - mode->hdisplay;
+       int hsynclen = mode->hsync_end - mode->hsync_start;
+       int vbporch = mode->vsync_start - mode->vdisplay;
+       int vsynclen = mode->vsync_end - mode->vsync_start;
+
+       /*
+        * Page 4
+        */
+       regmap_write(priv->regmap, 0x03, 0x04);
+
+       /* Turn everything off to set all the registers to their defaults. */
+       regmap_write(priv->regmap, 0x52, 0x00);
+       /* Bring I/O block up. */
+       regmap_write(priv->regmap, 0x52, RESETIB);
+
+       /*
+        * Page 0
+        */
+       regmap_write(priv->regmap, 0x03, 0x00);
+
+       /* Bring up parts we need from the power down. */
+       regmap_update_bits(priv->regmap, 0x07, DRI_PD | IO_PD, 0);
+       regmap_update_bits(priv->regmap, 0x08, DRI_PDDRI | PDDAC | PANEN, 0);
+       regmap_update_bits(priv->regmap, 0x09, DPD | GCKOFF |
+                                              HDMI_PD | VGA_PD, 0);
+       regmap_update_bits(priv->regmap, 0x0a, HD_DVIB, 0);
+
+       /* Horizontal input timing. */
+       regmap_write(priv->regmap, 0x0b, (mode->htotal >> 8) << 3 |
+                                        (mode->hdisplay >> 8));
+       regmap_write(priv->regmap, 0x0c, mode->hdisplay);
+       regmap_write(priv->regmap, 0x0d, mode->htotal);
+       regmap_write(priv->regmap, 0x0e, (hsynclen >> 8) << 3 |
+                                        (hbporch >> 8));
+       regmap_write(priv->regmap, 0x0f, hbporch);
+       regmap_write(priv->regmap, 0x10, hsynclen);
+
+       /* Vertical input timing. */
+       regmap_write(priv->regmap, 0x11, (mode->vtotal >> 8) << 3 |
+                                        (mode->vdisplay >> 8));
+       regmap_write(priv->regmap, 0x12, mode->vdisplay);
+       regmap_write(priv->regmap, 0x13, mode->vtotal);
+       regmap_write(priv->regmap, 0x14, ((vsynclen >> 8) << 3) |
+                                        (vbporch >> 8));
+       regmap_write(priv->regmap, 0x15, vbporch);
+       regmap_write(priv->regmap, 0x16, vsynclen);
+
+       /* Input color swap. */
+       regmap_update_bits(priv->regmap, 0x18, SWAP, BYTE_SWAP_BGR);
+
+       /* Input clock and sync polarity. */
+       regmap_update_bits(priv->regmap, 0x19, 0x1, mode->clock >> 16);
+       regmap_update_bits(priv->regmap, 0x19, HPO_I | VPO_I | GCLKFREQ,
+                          (mode->flags & DRM_MODE_FLAG_PHSYNC) ? HPO_I : 0 |
+                          (mode->flags & DRM_MODE_FLAG_PVSYNC) ? VPO_I : 0 |
+                          mode->clock >> 16);
+       regmap_write(priv->regmap, 0x1a, mode->clock >> 8);
+       regmap_write(priv->regmap, 0x1b, mode->clock);
+
+       /* Horizontal output timing. */
+       regmap_write(priv->regmap, 0x1f, (mode->htotal >> 8) << 3 |
+                                        (mode->hdisplay >> 8));
+       regmap_write(priv->regmap, 0x20, mode->hdisplay);
+       regmap_write(priv->regmap, 0x21, mode->htotal);
+
+       /* Vertical output timing. */
+       regmap_write(priv->regmap, 0x25, (mode->vtotal >> 8) << 3 |
+                                        (mode->vdisplay >> 8));
+       regmap_write(priv->regmap, 0x26, mode->vdisplay);
+       regmap_write(priv->regmap, 0x27, mode->vtotal);
+
+       /* VGA channel bypass */
+       regmap_update_bits(priv->regmap, 0x2b, VFMT, 9);
+
+       /* Output sync polarity. */
+       regmap_update_bits(priv->regmap, 0x2e, HPO_O | VPO_O,
+                          (mode->flags & DRM_MODE_FLAG_PHSYNC) ? HPO_O : 0 |
+                          (mode->flags & DRM_MODE_FLAG_PVSYNC) ? VPO_O : 0);
+
+       /* HDMI horizontal output timing. */
+       regmap_update_bits(priv->regmap, 0x54, HWO_HDMI_HI | HOO_HDMI_HI,
+                                              (hsynclen >> 8) << 3 |
+                                              (hbporch >> 8));
+       regmap_write(priv->regmap, 0x55, hbporch);
+       regmap_write(priv->regmap, 0x56, hsynclen);
+
+       /* HDMI vertical output timing. */
+       regmap_update_bits(priv->regmap, 0x57, VWO_HDMI_HI | VOO_HDMI_HI,
+                                              (vsynclen >> 8) << 3 |
+                                              (vbporch >> 8));
+       regmap_write(priv->regmap, 0x58, vbporch);
+       regmap_write(priv->regmap, 0x59, vsynclen);
+
+       /* Pick HDMI, not LVDS. */
+       regmap_update_bits(priv->regmap, 0x7e, HDMI_LVDS_SEL, HDMI_LVDS_SEL);
+
+       /*
+        * Page 1
+        */
+       regmap_write(priv->regmap, 0x03, 0x01);
+
+       /* No idea what these do, but VGA is wobbly and blinky without them. */
+       regmap_update_bits(priv->regmap, 0x07, CKINV, CKINV);
+       regmap_update_bits(priv->regmap, 0x08, DISPON, DISPON);
+
+       /* DRI PLL */
+       regmap_update_bits(priv->regmap, 0x0c, DRI_PLL_DIVSEL, DRI_PLL_DIVSEL);
+       if (mode->clock <= 40000) {
+               regmap_update_bits(priv->regmap, 0x0c, DRI_PLL_N1_1 |
+                                                      DRI_PLL_N1_0 |
+                                                      DRI_PLL_N3_1 |
+                                                      DRI_PLL_N3_0,
+                                                      0);
+       } else if (mode->clock < 80000) {
+               regmap_update_bits(priv->regmap, 0x0c, DRI_PLL_N1_1 |
+                                                      DRI_PLL_N1_0 |
+                                                      DRI_PLL_N3_1 |
+                                                      DRI_PLL_N3_0,
+                                                      DRI_PLL_N3_0 |
+                                                      DRI_PLL_N1_0);
+       } else {
+               regmap_update_bits(priv->regmap, 0x0c, DRI_PLL_N1_1 |
+                                                      DRI_PLL_N1_0 |
+                                                      DRI_PLL_N3_1 |
+                                                      DRI_PLL_N3_0,
+                                                      DRI_PLL_N3_1 |
+                                                      DRI_PLL_N1_1);
+       }
+
+       /* This seems to be color calibration for VGA. */
+       regmap_write(priv->regmap, 0x64, 0x29); /* LSB Blue */
+       regmap_write(priv->regmap, 0x65, 0x29); /* LSB Green */
+       regmap_write(priv->regmap, 0x66, 0x29); /* LSB Red */
+       regmap_write(priv->regmap, 0x67, 0x00); /* MSB Blue */
+       regmap_write(priv->regmap, 0x68, 0x00); /* MSB Green */
+       regmap_write(priv->regmap, 0x69, 0x00); /* MSB Red */
+
+       regmap_update_bits(priv->regmap, 0x6b, DRI_PD_SER, 0x00);
+       regmap_update_bits(priv->regmap, 0x6c, DRI_PLL_PD, 0x00);
+
+       /*
+        * Page 3
+        */
+       regmap_write(priv->regmap, 0x03, 0x03);
+
+       /* More bypasses and apparently another HDMI/LVDS selector. */
+       regmap_update_bits(priv->regmap, 0x28, VGACLK_BP | HM_LV_SEL,
+                                              VGACLK_BP | HM_LV_SEL);
+       regmap_update_bits(priv->regmap, 0x2a, HDMICLK_BP | HDMI_BP,
+                                              HDMICLK_BP | HDMI_BP);
+
+       /*
+        * Page 4
+        */
+       regmap_write(priv->regmap, 0x03, 0x04);
+
+       /* Output clock. */
+       regmap_write(priv->regmap, 0x10, mode->clock >> 16);
+       regmap_write(priv->regmap, 0x11, mode->clock >> 8);
+       regmap_write(priv->regmap, 0x12, mode->clock);
+}
+
+static const struct drm_bridge_funcs ch7033_bridge_funcs = {
+       .attach = ch7033_bridge_attach,
+       .detach = ch7033_bridge_detach,
+       .mode_valid = ch7033_bridge_mode_valid,
+       .disable = ch7033_bridge_disable,
+       .enable = ch7033_bridge_enable,
+       .mode_set = ch7033_bridge_mode_set,
+};
+
+static const struct regmap_config ch7033_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+       .max_register = 0x7f,
+};
+
+static int ch7033_probe(struct i2c_client *client,
+                       const struct i2c_device_id *id)
+{
+       struct device *dev = &client->dev;
+       struct ch7033_priv *priv;
+       unsigned int val;
+       int ret;
+
+       priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       dev_set_drvdata(dev, priv);
+
+       ret = drm_of_find_panel_or_bridge(dev->of_node, 1, -1, NULL,
+                                         &priv->next_bridge);
+       if (ret)
+               return ret;
+
+       priv->regmap = devm_regmap_init_i2c(client, &ch7033_regmap_config);
+       if (IS_ERR(priv->regmap)) {
+               dev_err(&client->dev, "regmap init failed\n");
+               return PTR_ERR(priv->regmap);
+       }
+
+       ret = regmap_read(priv->regmap, 0x00, &val);
+       if (ret < 0) {
+               dev_err(&client->dev, "error reading the model id: %d\n", ret);
+               return ret;
+       }
+       if ((val & 0xf7) != 0x56) {
+               dev_err(&client->dev, "the device is not a ch7033\n");
+               return -ENODEV;
+       }
+
+       regmap_write(priv->regmap, 0x03, 0x04);
+       ret = regmap_read(priv->regmap, 0x51, &val);
+       if (ret < 0) {
+               dev_err(&client->dev, "error reading the model id: %d\n", ret);
+               return ret;
+       }
+       if ((val & 0x0f) != 3) {
+               dev_err(&client->dev, "unknown revision %u\n", val);
+               return -ENODEV;
+       }
+
+       INIT_LIST_HEAD(&priv->bridge.list);
+       priv->bridge.funcs = &ch7033_bridge_funcs;
+       priv->bridge.of_node = dev->of_node;
+       drm_bridge_add(&priv->bridge);
+
+       dev_info(dev, "Chrontel CH7033 Video Encoder\n");
+       return 0;
+}
+
+static int ch7033_remove(struct i2c_client *client)
+{
+       struct device *dev = &client->dev;
+       struct ch7033_priv *priv = dev_get_drvdata(dev);
+
+       drm_bridge_remove(&priv->bridge);
+
+       return 0;
+}
+
+static const struct of_device_id ch7033_dt_ids[] = {
+       { .compatible = "chrontel,ch7033", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, ch7033_dt_ids);
+
+static const struct i2c_device_id ch7033_ids[] = {
+       { "ch7033", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, ch7033_ids);
+
+static struct i2c_driver ch7033_driver = {
+       .probe = ch7033_probe,
+       .remove = ch7033_remove,
+       .driver = {
+               .name = "ch7033",
+               .of_match_table = of_match_ptr(ch7033_dt_ids),
+       },
+       .id_table = ch7033_ids,
+};
+
+module_i2c_driver(ch7033_driver);
+
+MODULE_AUTHOR("Lubomir Rintel <lkundrak@v3.sk>");
+MODULE_DESCRIPTION("Chrontel CH7033 Video Encoder Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c
new file mode 100644 (file)
index 0000000..b14d725
--- /dev/null
@@ -0,0 +1,1213 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * i.MX8 NWL MIPI DSI host driver
+ *
+ * Copyright (C) 2017 NXP
+ * Copyright (C) 2020 Purism SPC
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/math64.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/mux/consumer.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/sys_soc.h>
+#include <linux/time64.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+#include <video/mipi_display.h>
+
+#include "nwl-dsi.h"
+
+#define DRV_NAME "nwl-dsi"
+
+/* i.MX8 NWL quirks */
+/* i.MX8MQ errata E11418 */
+#define E11418_HS_MODE_QUIRK   BIT(0)
+
+#define NWL_DSI_MIPI_FIFO_TIMEOUT msecs_to_jiffies(500)
+
+enum transfer_direction {
+       DSI_PACKET_SEND,
+       DSI_PACKET_RECEIVE,
+};
+
+#define NWL_DSI_ENDPOINT_LCDIF 0
+#define NWL_DSI_ENDPOINT_DCSS 1
+
+struct nwl_dsi_plat_clk_config {
+       const char *id;
+       struct clk *clk;
+       bool present;
+};
+
+struct nwl_dsi_transfer {
+       const struct mipi_dsi_msg *msg;
+       struct mipi_dsi_packet packet;
+       struct completion completed;
+
+       int status; /* status of transmission */
+       enum transfer_direction direction;
+       bool need_bta;
+       u8 cmd;
+       u16 rx_word_count;
+       size_t tx_len; /* in bytes */
+       size_t rx_len; /* in bytes */
+};
+
+struct nwl_dsi {
+       struct drm_bridge bridge;
+       struct mipi_dsi_host dsi_host;
+       struct drm_bridge *panel_bridge;
+       struct device *dev;
+       struct phy *phy;
+       union phy_configure_opts phy_cfg;
+       unsigned int quirks;
+
+       struct regmap *regmap;
+       int irq;
+       /*
+        * The DSI host controller needs this reset sequence according to NWL:
+        * 1. Deassert pclk reset to get access to DSI regs
+        * 2. Configure DSI Host and DPHY and enable DPHY
+        * 3. Deassert ESC and BYTE resets to allow host TX operations)
+        * 4. Send DSI cmds to configure peripheral (handled by panel drv)
+        * 5. Deassert DPI reset so DPI receives pixels and starts sending
+        *    DSI data
+        *
+        * TODO: Since panel_bridges do their DSI setup in enable we
+        * currently have 4. and 5. swapped.
+        */
+       struct reset_control *rst_byte;
+       struct reset_control *rst_esc;
+       struct reset_control *rst_dpi;
+       struct reset_control *rst_pclk;
+       struct mux_control *mux;
+
+       /* DSI clocks */
+       struct clk *phy_ref_clk;
+       struct clk *rx_esc_clk;
+       struct clk *tx_esc_clk;
+       struct clk *core_clk;
+       /*
+        * hardware bug: the i.MX8MQ needs this clock on during reset
+        * even when not using LCDIF.
+        */
+       struct clk *lcdif_clk;
+
+       /* dsi lanes */
+       u32 lanes;
+       enum mipi_dsi_pixel_format format;
+       struct drm_display_mode mode;
+       unsigned long dsi_mode_flags;
+       int error;
+
+       struct nwl_dsi_transfer *xfer;
+};
+
+static const struct regmap_config nwl_dsi_regmap_config = {
+       .reg_bits = 16,
+       .val_bits = 32,
+       .reg_stride = 4,
+       .max_register = NWL_DSI_IRQ_MASK2,
+       .name = DRV_NAME,
+};
+
+static inline struct nwl_dsi *bridge_to_dsi(struct drm_bridge *bridge)
+{
+       return container_of(bridge, struct nwl_dsi, bridge);
+}
+
+static int nwl_dsi_clear_error(struct nwl_dsi *dsi)
+{
+       int ret = dsi->error;
+
+       dsi->error = 0;
+       return ret;
+}
+
+static void nwl_dsi_write(struct nwl_dsi *dsi, unsigned int reg, u32 val)
+{
+       int ret;
+
+       if (dsi->error)
+               return;
+
+       ret = regmap_write(dsi->regmap, reg, val);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dsi->dev,
+                             "Failed to write NWL DSI reg 0x%x: %d\n", reg,
+                             ret);
+               dsi->error = ret;
+       }
+}
+
+static u32 nwl_dsi_read(struct nwl_dsi *dsi, u32 reg)
+{
+       unsigned int val;
+       int ret;
+
+       if (dsi->error)
+               return 0;
+
+       ret = regmap_read(dsi->regmap, reg, &val);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dsi->dev, "Failed to read NWL DSI reg 0x%x: %d\n",
+                             reg, ret);
+               dsi->error = ret;
+       }
+       return val;
+}
+
+static int nwl_dsi_get_dpi_pixel_format(enum mipi_dsi_pixel_format format)
+{
+       switch (format) {
+       case MIPI_DSI_FMT_RGB565:
+               return NWL_DSI_PIXEL_FORMAT_16;
+       case MIPI_DSI_FMT_RGB666:
+               return NWL_DSI_PIXEL_FORMAT_18L;
+       case MIPI_DSI_FMT_RGB666_PACKED:
+               return NWL_DSI_PIXEL_FORMAT_18;
+       case MIPI_DSI_FMT_RGB888:
+               return NWL_DSI_PIXEL_FORMAT_24;
+       default:
+               return -EINVAL;
+       }
+}
+
+/*
+ * ps2bc - Picoseconds to byte clock cycles
+ */
+static u32 ps2bc(struct nwl_dsi *dsi, unsigned long long ps)
+{
+       u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
+
+       return DIV64_U64_ROUND_UP(ps * dsi->mode.clock * bpp,
+                                 dsi->lanes * 8 * NSEC_PER_SEC);
+}
+
+/*
+ * ui2bc - UI time periods to byte clock cycles
+ */
+static u32 ui2bc(struct nwl_dsi *dsi, unsigned long long ui)
+{
+       u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
+
+       return DIV64_U64_ROUND_UP(ui * dsi->lanes,
+                                 dsi->mode.clock * 1000 * bpp);
+}
+
+/*
+ * us2bc - micro seconds to lp clock cycles
+ */
+static u32 us2lp(u32 lp_clk_rate, unsigned long us)
+{
+       return DIV_ROUND_UP(us * lp_clk_rate, USEC_PER_SEC);
+}
+
+static int nwl_dsi_config_host(struct nwl_dsi *dsi)
+{
+       u32 cycles;
+       struct phy_configure_opts_mipi_dphy *cfg = &dsi->phy_cfg.mipi_dphy;
+
+       if (dsi->lanes < 1 || dsi->lanes > 4)
+               return -EINVAL;
+
+       DRM_DEV_DEBUG_DRIVER(dsi->dev, "DSI Lanes %d\n", dsi->lanes);
+       nwl_dsi_write(dsi, NWL_DSI_CFG_NUM_LANES, dsi->lanes - 1);
+
+       if (dsi->dsi_mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) {
+               nwl_dsi_write(dsi, NWL_DSI_CFG_NONCONTINUOUS_CLK, 0x01);
+               nwl_dsi_write(dsi, NWL_DSI_CFG_AUTOINSERT_EOTP, 0x01);
+       } else {
+               nwl_dsi_write(dsi, NWL_DSI_CFG_NONCONTINUOUS_CLK, 0x00);
+               nwl_dsi_write(dsi, NWL_DSI_CFG_AUTOINSERT_EOTP, 0x00);
+       }
+
+       /* values in byte clock cycles */
+       cycles = ui2bc(dsi, cfg->clk_pre);
+       DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_pre: 0x%x\n", cycles);
+       nwl_dsi_write(dsi, NWL_DSI_CFG_T_PRE, cycles);
+       cycles = ps2bc(dsi, cfg->lpx + cfg->clk_prepare + cfg->clk_zero);
+       DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_tx_gap (pre): 0x%x\n", cycles);
+       cycles += ui2bc(dsi, cfg->clk_pre);
+       DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_post: 0x%x\n", cycles);
+       nwl_dsi_write(dsi, NWL_DSI_CFG_T_POST, cycles);
+       cycles = ps2bc(dsi, cfg->hs_exit);
+       DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_tx_gap: 0x%x\n", cycles);
+       nwl_dsi_write(dsi, NWL_DSI_CFG_TX_GAP, cycles);
+
+       nwl_dsi_write(dsi, NWL_DSI_CFG_EXTRA_CMDS_AFTER_EOTP, 0x01);
+       nwl_dsi_write(dsi, NWL_DSI_CFG_HTX_TO_COUNT, 0x00);
+       nwl_dsi_write(dsi, NWL_DSI_CFG_LRX_H_TO_COUNT, 0x00);
+       nwl_dsi_write(dsi, NWL_DSI_CFG_BTA_H_TO_COUNT, 0x00);
+       /* In LP clock cycles */
+       cycles = us2lp(cfg->lp_clk_rate, cfg->wakeup);
+       DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_twakeup: 0x%x\n", cycles);
+       nwl_dsi_write(dsi, NWL_DSI_CFG_TWAKEUP, cycles);
+
+       return nwl_dsi_clear_error(dsi);
+}
+
+static int nwl_dsi_config_dpi(struct nwl_dsi *dsi)
+{
+       u32 mode;
+       int color_format;
+       bool burst_mode;
+       int hfront_porch, hback_porch, vfront_porch, vback_porch;
+       int hsync_len, vsync_len;
+
+       hfront_porch = dsi->mode.hsync_start - dsi->mode.hdisplay;
+       hsync_len = dsi->mode.hsync_end - dsi->mode.hsync_start;
+       hback_porch = dsi->mode.htotal - dsi->mode.hsync_end;
+
+       vfront_porch = dsi->mode.vsync_start - dsi->mode.vdisplay;
+       vsync_len = dsi->mode.vsync_end - dsi->mode.vsync_start;
+       vback_porch = dsi->mode.vtotal - dsi->mode.vsync_end;
+
+       DRM_DEV_DEBUG_DRIVER(dsi->dev, "hfront_porch = %d\n", hfront_porch);
+       DRM_DEV_DEBUG_DRIVER(dsi->dev, "hback_porch = %d\n", hback_porch);
+       DRM_DEV_DEBUG_DRIVER(dsi->dev, "hsync_len = %d\n", hsync_len);
+       DRM_DEV_DEBUG_DRIVER(dsi->dev, "hdisplay = %d\n", dsi->mode.hdisplay);
+       DRM_DEV_DEBUG_DRIVER(dsi->dev, "vfront_porch = %d\n", vfront_porch);
+       DRM_DEV_DEBUG_DRIVER(dsi->dev, "vback_porch = %d\n", vback_porch);
+       DRM_DEV_DEBUG_DRIVER(dsi->dev, "vsync_len = %d\n", vsync_len);
+       DRM_DEV_DEBUG_DRIVER(dsi->dev, "vactive = %d\n", dsi->mode.vdisplay);
+       DRM_DEV_DEBUG_DRIVER(dsi->dev, "clock = %d kHz\n", dsi->mode.clock);
+
+       color_format = nwl_dsi_get_dpi_pixel_format(dsi->format);
+       if (color_format < 0) {
+               DRM_DEV_ERROR(dsi->dev, "Invalid color format 0x%x\n",
+                             dsi->format);
+               return color_format;
+       }
+       DRM_DEV_DEBUG_DRIVER(dsi->dev, "pixel fmt = %d\n", dsi->format);
+
+       nwl_dsi_write(dsi, NWL_DSI_INTERFACE_COLOR_CODING, NWL_DSI_DPI_24_BIT);
+       nwl_dsi_write(dsi, NWL_DSI_PIXEL_FORMAT, color_format);
+       /*
+        * Adjusting input polarity based on the video mode results in
+        * a black screen so always pick active low:
+        */
+       nwl_dsi_write(dsi, NWL_DSI_VSYNC_POLARITY,
+                     NWL_DSI_VSYNC_POLARITY_ACTIVE_LOW);
+       nwl_dsi_write(dsi, NWL_DSI_HSYNC_POLARITY,
+                     NWL_DSI_HSYNC_POLARITY_ACTIVE_LOW);
+
+       burst_mode = (dsi->dsi_mode_flags & MIPI_DSI_MODE_VIDEO_BURST) &&
+                    !(dsi->dsi_mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE);
+
+       if (burst_mode) {
+               nwl_dsi_write(dsi, NWL_DSI_VIDEO_MODE, NWL_DSI_VM_BURST_MODE);
+               nwl_dsi_write(dsi, NWL_DSI_PIXEL_FIFO_SEND_LEVEL, 256);
+       } else {
+               mode = ((dsi->dsi_mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) ?
+                               NWL_DSI_VM_BURST_MODE_WITH_SYNC_PULSES :
+                               NWL_DSI_VM_NON_BURST_MODE_WITH_SYNC_EVENTS);
+               nwl_dsi_write(dsi, NWL_DSI_VIDEO_MODE, mode);
+               nwl_dsi_write(dsi, NWL_DSI_PIXEL_FIFO_SEND_LEVEL,
+                             dsi->mode.hdisplay);
+       }
+
+       nwl_dsi_write(dsi, NWL_DSI_HFP, hfront_porch);
+       nwl_dsi_write(dsi, NWL_DSI_HBP, hback_porch);
+       nwl_dsi_write(dsi, NWL_DSI_HSA, hsync_len);
+
+       nwl_dsi_write(dsi, NWL_DSI_ENABLE_MULT_PKTS, 0x0);
+       nwl_dsi_write(dsi, NWL_DSI_BLLP_MODE, 0x1);
+       nwl_dsi_write(dsi, NWL_DSI_USE_NULL_PKT_BLLP, 0x0);
+       nwl_dsi_write(dsi, NWL_DSI_VC, 0x0);
+
+       nwl_dsi_write(dsi, NWL_DSI_PIXEL_PAYLOAD_SIZE, dsi->mode.hdisplay);
+       nwl_dsi_write(dsi, NWL_DSI_VACTIVE, dsi->mode.vdisplay - 1);
+       nwl_dsi_write(dsi, NWL_DSI_VBP, vback_porch);
+       nwl_dsi_write(dsi, NWL_DSI_VFP, vfront_porch);
+
+       return nwl_dsi_clear_error(dsi);
+}
+
+static int nwl_dsi_init_interrupts(struct nwl_dsi *dsi)
+{
+       u32 irq_enable;
+
+       nwl_dsi_write(dsi, NWL_DSI_IRQ_MASK, 0xffffffff);
+       nwl_dsi_write(dsi, NWL_DSI_IRQ_MASK2, 0x7);
+
+       irq_enable = ~(u32)(NWL_DSI_TX_PKT_DONE_MASK |
+                           NWL_DSI_RX_PKT_HDR_RCVD_MASK |
+                           NWL_DSI_TX_FIFO_OVFLW_MASK |
+                           NWL_DSI_HS_TX_TIMEOUT_MASK);
+
+       nwl_dsi_write(dsi, NWL_DSI_IRQ_MASK, irq_enable);
+
+       return nwl_dsi_clear_error(dsi);
+}
+
+static int nwl_dsi_host_attach(struct mipi_dsi_host *dsi_host,
+                              struct mipi_dsi_device *device)
+{
+       struct nwl_dsi *dsi = container_of(dsi_host, struct nwl_dsi, dsi_host);
+       struct device *dev = dsi->dev;
+
+       DRM_DEV_INFO(dev, "lanes=%u, format=0x%x flags=0x%lx\n", device->lanes,
+                    device->format, device->mode_flags);
+
+       if (device->lanes < 1 || device->lanes > 4)
+               return -EINVAL;
+
+       dsi->lanes = device->lanes;
+       dsi->format = device->format;
+       dsi->dsi_mode_flags = device->mode_flags;
+
+       return 0;
+}
+
+static bool nwl_dsi_read_packet(struct nwl_dsi *dsi, u32 status)
+{
+       struct device *dev = dsi->dev;
+       struct nwl_dsi_transfer *xfer = dsi->xfer;
+       int err;
+       u8 *payload = xfer->msg->rx_buf;
+       u32 val;
+       u16 word_count;
+       u8 channel;
+       u8 data_type;
+
+       xfer->status = 0;
+
+       if (xfer->rx_word_count == 0) {
+               if (!(status & NWL_DSI_RX_PKT_HDR_RCVD))
+                       return false;
+               /* Get the RX header and parse it */
+               val = nwl_dsi_read(dsi, NWL_DSI_RX_PKT_HEADER);
+               err = nwl_dsi_clear_error(dsi);
+               if (err)
+                       xfer->status = err;
+               word_count = NWL_DSI_WC(val);
+               channel = NWL_DSI_RX_VC(val);
+               data_type = NWL_DSI_RX_DT(val);
+
+               if (channel != xfer->msg->channel) {
+                       DRM_DEV_ERROR(dev,
+                                     "[%02X] Channel mismatch (%u != %u)\n",
+                                     xfer->cmd, channel, xfer->msg->channel);
+                       xfer->status = -EINVAL;
+                       return true;
+               }
+
+               switch (data_type) {
+               case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
+                       fallthrough;
+               case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
+                       if (xfer->msg->rx_len > 1) {
+                               /* read second byte */
+                               payload[1] = word_count >> 8;
+                               ++xfer->rx_len;
+                       }
+                       fallthrough;
+               case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
+                       fallthrough;
+               case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
+                       if (xfer->msg->rx_len > 0) {
+                               /* read first byte */
+                               payload[0] = word_count & 0xff;
+                               ++xfer->rx_len;
+                       }
+                       xfer->status = xfer->rx_len;
+                       return true;
+               case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
+                       word_count &= 0xff;
+                       DRM_DEV_ERROR(dev, "[%02X] DSI error report: 0x%02x\n",
+                                     xfer->cmd, word_count);
+                       xfer->status = -EPROTO;
+                       return true;
+               }
+
+               if (word_count > xfer->msg->rx_len) {
+                       DRM_DEV_ERROR(dev,
+                               "[%02X] Receive buffer too small: %zu (< %u)\n",
+                               xfer->cmd, xfer->msg->rx_len, word_count);
+                       xfer->status = -EINVAL;
+                       return true;
+               }
+
+               xfer->rx_word_count = word_count;
+       } else {
+               /* Set word_count from previous header read */
+               word_count = xfer->rx_word_count;
+       }
+
+       /* If RX payload is not yet received, wait for it */
+       if (!(status & NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD))
+               return false;
+
+       /* Read the RX payload */
+       while (word_count >= 4) {
+               val = nwl_dsi_read(dsi, NWL_DSI_RX_PAYLOAD);
+               payload[0] = (val >> 0) & 0xff;
+               payload[1] = (val >> 8) & 0xff;
+               payload[2] = (val >> 16) & 0xff;
+               payload[3] = (val >> 24) & 0xff;
+               payload += 4;
+               xfer->rx_len += 4;
+               word_count -= 4;
+       }
+
+       if (word_count > 0) {
+               val = nwl_dsi_read(dsi, NWL_DSI_RX_PAYLOAD);
+               switch (word_count) {
+               case 3:
+                       payload[2] = (val >> 16) & 0xff;
+                       ++xfer->rx_len;
+                       fallthrough;
+               case 2:
+                       payload[1] = (val >> 8) & 0xff;
+                       ++xfer->rx_len;
+                       fallthrough;
+               case 1:
+                       payload[0] = (val >> 0) & 0xff;
+                       ++xfer->rx_len;
+                       break;
+               }
+       }
+
+       xfer->status = xfer->rx_len;
+       err = nwl_dsi_clear_error(dsi);
+       if (err)
+               xfer->status = err;
+
+       return true;
+}
+
+static void nwl_dsi_finish_transmission(struct nwl_dsi *dsi, u32 status)
+{
+       struct nwl_dsi_transfer *xfer = dsi->xfer;
+       bool end_packet = false;
+
+       if (!xfer)
+               return;
+
+       if (xfer->direction == DSI_PACKET_SEND &&
+           status & NWL_DSI_TX_PKT_DONE) {
+               xfer->status = xfer->tx_len;
+               end_packet = true;
+       } else if (status & NWL_DSI_DPHY_DIRECTION &&
+                  ((status & (NWL_DSI_RX_PKT_HDR_RCVD |
+                              NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD)))) {
+               end_packet = nwl_dsi_read_packet(dsi, status);
+       }
+
+       if (end_packet)
+               complete(&xfer->completed);
+}
+
+static void nwl_dsi_begin_transmission(struct nwl_dsi *dsi)
+{
+       struct nwl_dsi_transfer *xfer = dsi->xfer;
+       struct mipi_dsi_packet *pkt = &xfer->packet;
+       const u8 *payload;
+       size_t length;
+       u16 word_count;
+       u8 hs_mode;
+       u32 val;
+       u32 hs_workaround = 0;
+
+       /* Send the payload, if any */
+       length = pkt->payload_length;
+       payload = pkt->payload;
+
+       while (length >= 4) {
+               val = *(u32 *)payload;
+               hs_workaround |= !(val & 0xFFFF00);
+               nwl_dsi_write(dsi, NWL_DSI_TX_PAYLOAD, val);
+               payload += 4;
+               length -= 4;
+       }
+       /* Send the rest of the payload */
+       val = 0;
+       switch (length) {
+       case 3:
+               val |= payload[2] << 16;
+               fallthrough;
+       case 2:
+               val |= payload[1] << 8;
+               hs_workaround |= !(val & 0xFFFF00);
+               fallthrough;
+       case 1:
+               val |= payload[0];
+               nwl_dsi_write(dsi, NWL_DSI_TX_PAYLOAD, val);
+               break;
+       }
+       xfer->tx_len = pkt->payload_length;
+
+       /*
+        * Send the header
+        * header[0] = Virtual Channel + Data Type
+        * header[1] = Word Count LSB (LP) or first param (SP)
+        * header[2] = Word Count MSB (LP) or second param (SP)
+        */
+       word_count = pkt->header[1] | (pkt->header[2] << 8);
+       if (hs_workaround && (dsi->quirks & E11418_HS_MODE_QUIRK)) {
+               DRM_DEV_DEBUG_DRIVER(dsi->dev,
+                                    "Using hs mode workaround for cmd 0x%x\n",
+                                    xfer->cmd);
+               hs_mode = 1;
+       } else {
+               hs_mode = (xfer->msg->flags & MIPI_DSI_MSG_USE_LPM) ? 0 : 1;
+       }
+       val = NWL_DSI_WC(word_count) | NWL_DSI_TX_VC(xfer->msg->channel) |
+             NWL_DSI_TX_DT(xfer->msg->type) | NWL_DSI_HS_SEL(hs_mode) |
+             NWL_DSI_BTA_TX(xfer->need_bta);
+       nwl_dsi_write(dsi, NWL_DSI_PKT_CONTROL, val);
+
+       /* Send packet command */
+       nwl_dsi_write(dsi, NWL_DSI_SEND_PACKET, 0x1);
+}
+
+static ssize_t nwl_dsi_host_transfer(struct mipi_dsi_host *dsi_host,
+                                    const struct mipi_dsi_msg *msg)
+{
+       struct nwl_dsi *dsi = container_of(dsi_host, struct nwl_dsi, dsi_host);
+       struct nwl_dsi_transfer xfer;
+       ssize_t ret = 0;
+
+       /* Create packet to be sent */
+       dsi->xfer = &xfer;
+       ret = mipi_dsi_create_packet(&xfer.packet, msg);
+       if (ret < 0) {
+               dsi->xfer = NULL;
+               return ret;
+       }
+
+       if ((msg->type & MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM ||
+            msg->type & MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM ||
+            msg->type & MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM ||
+            msg->type & MIPI_DSI_DCS_READ) &&
+           msg->rx_len > 0 && msg->rx_buf)
+               xfer.direction = DSI_PACKET_RECEIVE;
+       else
+               xfer.direction = DSI_PACKET_SEND;
+
+       xfer.need_bta = (xfer.direction == DSI_PACKET_RECEIVE);
+       xfer.need_bta |= (msg->flags & MIPI_DSI_MSG_REQ_ACK) ? 1 : 0;
+       xfer.msg = msg;
+       xfer.status = -ETIMEDOUT;
+       xfer.rx_word_count = 0;
+       xfer.rx_len = 0;
+       xfer.cmd = 0x00;
+       if (msg->tx_len > 0)
+               xfer.cmd = ((u8 *)(msg->tx_buf))[0];
+       init_completion(&xfer.completed);
+
+       ret = clk_prepare_enable(dsi->rx_esc_clk);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dsi->dev, "Failed to enable rx_esc clk: %zd\n",
+                             ret);
+               return ret;
+       }
+       DRM_DEV_DEBUG_DRIVER(dsi->dev, "Enabled rx_esc clk @%lu Hz\n",
+                            clk_get_rate(dsi->rx_esc_clk));
+
+       /* Initiate the DSI packet transmision */
+       nwl_dsi_begin_transmission(dsi);
+
+       if (!wait_for_completion_timeout(&xfer.completed,
+                                        NWL_DSI_MIPI_FIFO_TIMEOUT)) {
+               DRM_DEV_ERROR(dsi_host->dev, "[%02X] DSI transfer timed out\n",
+                             xfer.cmd);
+               ret = -ETIMEDOUT;
+       } else {
+               ret = xfer.status;
+       }
+
+       clk_disable_unprepare(dsi->rx_esc_clk);
+
+       return ret;
+}
+
+static const struct mipi_dsi_host_ops nwl_dsi_host_ops = {
+       .attach = nwl_dsi_host_attach,
+       .transfer = nwl_dsi_host_transfer,
+};
+
+static irqreturn_t nwl_dsi_irq_handler(int irq, void *data)
+{
+       u32 irq_status;
+       struct nwl_dsi *dsi = data;
+
+       irq_status = nwl_dsi_read(dsi, NWL_DSI_IRQ_STATUS);
+
+       if (irq_status & NWL_DSI_TX_FIFO_OVFLW)
+               DRM_DEV_ERROR_RATELIMITED(dsi->dev, "tx fifo overflow\n");
+
+       if (irq_status & NWL_DSI_HS_TX_TIMEOUT)
+               DRM_DEV_ERROR_RATELIMITED(dsi->dev, "HS tx timeout\n");
+
+       if (irq_status & NWL_DSI_TX_PKT_DONE ||
+           irq_status & NWL_DSI_RX_PKT_HDR_RCVD ||
+           irq_status & NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD)
+               nwl_dsi_finish_transmission(dsi, irq_status);
+
+       return IRQ_HANDLED;
+}
+
+static int nwl_dsi_enable(struct nwl_dsi *dsi)
+{
+       struct device *dev = dsi->dev;
+       union phy_configure_opts *phy_cfg = &dsi->phy_cfg;
+       int ret;
+
+       if (!dsi->lanes) {
+               DRM_DEV_ERROR(dev, "Need DSI lanes: %d\n", dsi->lanes);
+               return -EINVAL;
+       }
+
+       ret = phy_init(dsi->phy);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dev, "Failed to init DSI phy: %d\n", ret);
+               return ret;
+       }
+
+       ret = phy_configure(dsi->phy, phy_cfg);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dev, "Failed to configure DSI phy: %d\n", ret);
+               goto uninit_phy;
+       }
+
+       ret = clk_prepare_enable(dsi->tx_esc_clk);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dsi->dev, "Failed to enable tx_esc clk: %d\n",
+                             ret);
+               goto uninit_phy;
+       }
+       DRM_DEV_DEBUG_DRIVER(dsi->dev, "Enabled tx_esc clk @%lu Hz\n",
+                            clk_get_rate(dsi->tx_esc_clk));
+
+       ret = nwl_dsi_config_host(dsi);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dev, "Failed to set up DSI: %d", ret);
+               goto disable_clock;
+       }
+
+       ret = nwl_dsi_config_dpi(dsi);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dev, "Failed to set up DPI: %d", ret);
+               goto disable_clock;
+       }
+
+       ret = phy_power_on(dsi->phy);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dev, "Failed to power on DPHY (%d)\n", ret);
+               goto disable_clock;
+       }
+
+       ret = nwl_dsi_init_interrupts(dsi);
+       if (ret < 0)
+               goto power_off_phy;
+
+       return ret;
+
+power_off_phy:
+       phy_power_off(dsi->phy);
+disable_clock:
+       clk_disable_unprepare(dsi->tx_esc_clk);
+uninit_phy:
+       phy_exit(dsi->phy);
+
+       return ret;
+}
+
+static int nwl_dsi_disable(struct nwl_dsi *dsi)
+{
+       struct device *dev = dsi->dev;
+
+       DRM_DEV_DEBUG_DRIVER(dev, "Disabling clocks and phy\n");
+
+       phy_power_off(dsi->phy);
+       phy_exit(dsi->phy);
+
+       /* Disabling the clock before the phy breaks enabling dsi again */
+       clk_disable_unprepare(dsi->tx_esc_clk);
+
+       return 0;
+}
+
+static void nwl_dsi_bridge_disable(struct drm_bridge *bridge)
+{
+       struct nwl_dsi *dsi = bridge_to_dsi(bridge);
+       int ret;
+
+       nwl_dsi_disable(dsi);
+
+       ret = reset_control_assert(dsi->rst_dpi);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dsi->dev, "Failed to assert DPI: %d\n", ret);
+               return;
+       }
+       ret = reset_control_assert(dsi->rst_byte);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dsi->dev, "Failed to assert ESC: %d\n", ret);
+               return;
+       }
+       ret = reset_control_assert(dsi->rst_esc);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dsi->dev, "Failed to assert BYTE: %d\n", ret);
+               return;
+       }
+       ret = reset_control_assert(dsi->rst_pclk);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dsi->dev, "Failed to assert PCLK: %d\n", ret);
+               return;
+       }
+
+       clk_disable_unprepare(dsi->core_clk);
+       clk_disable_unprepare(dsi->lcdif_clk);
+
+       pm_runtime_put(dsi->dev);
+}
+
+static int nwl_dsi_get_dphy_params(struct nwl_dsi *dsi,
+                                  const struct drm_display_mode *mode,
+                                  union phy_configure_opts *phy_opts)
+{
+       unsigned long rate;
+       int ret;
+
+       if (dsi->lanes < 1 || dsi->lanes > 4)
+               return -EINVAL;
+
+       /*
+        * So far the DPHY spec minimal timings work for both mixel
+        * dphy and nwl dsi host
+        */
+       ret = phy_mipi_dphy_get_default_config(mode->clock * 1000,
+               mipi_dsi_pixel_format_to_bpp(dsi->format), dsi->lanes,
+               &phy_opts->mipi_dphy);
+       if (ret < 0)
+               return ret;
+
+       rate = clk_get_rate(dsi->tx_esc_clk);
+       DRM_DEV_DEBUG_DRIVER(dsi->dev, "LP clk is @%lu Hz\n", rate);
+       phy_opts->mipi_dphy.lp_clk_rate = rate;
+
+       return 0;
+}
+
+static bool nwl_dsi_bridge_mode_fixup(struct drm_bridge *bridge,
+                                     const struct drm_display_mode *mode,
+                                     struct drm_display_mode *adjusted_mode)
+{
+       /* At least LCDIF + NWL needs active high sync */
+       adjusted_mode->flags |= (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
+       adjusted_mode->flags &= ~(DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC);
+
+       return true;
+}
+
+static enum drm_mode_status
+nwl_dsi_bridge_mode_valid(struct drm_bridge *bridge,
+                         const struct drm_display_mode *mode)
+{
+       struct nwl_dsi *dsi = bridge_to_dsi(bridge);
+       int bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
+
+       if (mode->clock * bpp > 15000000 * dsi->lanes)
+               return MODE_CLOCK_HIGH;
+
+       if (mode->clock * bpp < 80000 * dsi->lanes)
+               return MODE_CLOCK_LOW;
+
+       return MODE_OK;
+}
+
+static void
+nwl_dsi_bridge_mode_set(struct drm_bridge *bridge,
+                       const struct drm_display_mode *mode,
+                       const struct drm_display_mode *adjusted_mode)
+{
+       struct nwl_dsi *dsi = bridge_to_dsi(bridge);
+       struct device *dev = dsi->dev;
+       union phy_configure_opts new_cfg;
+       unsigned long phy_ref_rate;
+       int ret;
+
+       ret = nwl_dsi_get_dphy_params(dsi, adjusted_mode, &new_cfg);
+       if (ret < 0)
+               return;
+
+       /*
+        * If hs clock is unchanged, we're all good - all parameters are
+        * derived from it atm.
+        */
+       if (new_cfg.mipi_dphy.hs_clk_rate == dsi->phy_cfg.mipi_dphy.hs_clk_rate)
+               return;
+
+       phy_ref_rate = clk_get_rate(dsi->phy_ref_clk);
+       DRM_DEV_DEBUG_DRIVER(dev, "PHY at ref rate: %lu\n", phy_ref_rate);
+       /* Save the new desired phy config */
+       memcpy(&dsi->phy_cfg, &new_cfg, sizeof(new_cfg));
+
+       memcpy(&dsi->mode, adjusted_mode, sizeof(dsi->mode));
+       drm_mode_debug_printmodeline(adjusted_mode);
+}
+
+static void nwl_dsi_bridge_pre_enable(struct drm_bridge *bridge)
+{
+       struct nwl_dsi *dsi = bridge_to_dsi(bridge);
+       int ret;
+
+       pm_runtime_get_sync(dsi->dev);
+
+       if (clk_prepare_enable(dsi->lcdif_clk) < 0)
+               return;
+       if (clk_prepare_enable(dsi->core_clk) < 0)
+               return;
+
+       /* Step 1 from DSI reset-out instructions */
+       ret = reset_control_deassert(dsi->rst_pclk);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dsi->dev, "Failed to deassert PCLK: %d\n", ret);
+               return;
+       }
+
+       /* Step 2 from DSI reset-out instructions */
+       nwl_dsi_enable(dsi);
+
+       /* Step 3 from DSI reset-out instructions */
+       ret = reset_control_deassert(dsi->rst_esc);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dsi->dev, "Failed to deassert ESC: %d\n", ret);
+               return;
+       }
+       ret = reset_control_deassert(dsi->rst_byte);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dsi->dev, "Failed to deassert BYTE: %d\n", ret);
+               return;
+       }
+}
+
+static void nwl_dsi_bridge_enable(struct drm_bridge *bridge)
+{
+       struct nwl_dsi *dsi = bridge_to_dsi(bridge);
+       int ret;
+
+       /* Step 5 from DSI reset-out instructions */
+       ret = reset_control_deassert(dsi->rst_dpi);
+       if (ret < 0)
+               DRM_DEV_ERROR(dsi->dev, "Failed to deassert DPI: %d\n", ret);
+}
+
+static int nwl_dsi_bridge_attach(struct drm_bridge *bridge,
+                                enum drm_bridge_attach_flags flags)
+{
+       struct nwl_dsi *dsi = bridge_to_dsi(bridge);
+       struct drm_bridge *panel_bridge;
+       struct drm_panel *panel;
+       int ret;
+
+       if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+               DRM_ERROR("Fix bridge driver to make connector optional!");
+               return -EINVAL;
+       }
+
+       ret = drm_of_find_panel_or_bridge(dsi->dev->of_node, 1, 0, &panel,
+                                         &panel_bridge);
+       if (ret)
+               return ret;
+
+       if (panel) {
+               panel_bridge = drm_panel_bridge_add(panel);
+               if (IS_ERR(panel_bridge))
+                       return PTR_ERR(panel_bridge);
+       }
+       dsi->panel_bridge = panel_bridge;
+
+       if (!dsi->panel_bridge)
+               return -EPROBE_DEFER;
+
+       return drm_bridge_attach(bridge->encoder, dsi->panel_bridge, bridge,
+                                flags);
+}
+
+static void nwl_dsi_bridge_detach(struct drm_bridge *bridge)
+{      struct nwl_dsi *dsi = bridge_to_dsi(bridge);
+
+       drm_of_panel_bridge_remove(dsi->dev->of_node, 1, 0);
+}
+
+static const struct drm_bridge_funcs nwl_dsi_bridge_funcs = {
+       .pre_enable = nwl_dsi_bridge_pre_enable,
+       .enable     = nwl_dsi_bridge_enable,
+       .disable    = nwl_dsi_bridge_disable,
+       .mode_fixup = nwl_dsi_bridge_mode_fixup,
+       .mode_set   = nwl_dsi_bridge_mode_set,
+       .mode_valid = nwl_dsi_bridge_mode_valid,
+       .attach     = nwl_dsi_bridge_attach,
+       .detach     = nwl_dsi_bridge_detach,
+};
+
+static int nwl_dsi_parse_dt(struct nwl_dsi *dsi)
+{
+       struct platform_device *pdev = to_platform_device(dsi->dev);
+       struct clk *clk;
+       void __iomem *base;
+       int ret;
+
+       dsi->phy = devm_phy_get(dsi->dev, "dphy");
+       if (IS_ERR(dsi->phy)) {
+               ret = PTR_ERR(dsi->phy);
+               if (ret != -EPROBE_DEFER)
+                       DRM_DEV_ERROR(dsi->dev, "Could not get PHY: %d\n", ret);
+               return ret;
+       }
+
+       clk = devm_clk_get(dsi->dev, "lcdif");
+       if (IS_ERR(clk)) {
+               ret = PTR_ERR(clk);
+               DRM_DEV_ERROR(dsi->dev, "Failed to get lcdif clock: %d\n",
+                             ret);
+               return ret;
+       }
+       dsi->lcdif_clk = clk;
+
+       clk = devm_clk_get(dsi->dev, "core");
+       if (IS_ERR(clk)) {
+               ret = PTR_ERR(clk);
+               DRM_DEV_ERROR(dsi->dev, "Failed to get core clock: %d\n",
+                             ret);
+               return ret;
+       }
+       dsi->core_clk = clk;
+
+       clk = devm_clk_get(dsi->dev, "phy_ref");
+       if (IS_ERR(clk)) {
+               ret = PTR_ERR(clk);
+               DRM_DEV_ERROR(dsi->dev, "Failed to get phy_ref clock: %d\n",
+                             ret);
+               return ret;
+       }
+       dsi->phy_ref_clk = clk;
+
+       clk = devm_clk_get(dsi->dev, "rx_esc");
+       if (IS_ERR(clk)) {
+               ret = PTR_ERR(clk);
+               DRM_DEV_ERROR(dsi->dev, "Failed to get rx_esc clock: %d\n",
+                             ret);
+               return ret;
+       }
+       dsi->rx_esc_clk = clk;
+
+       clk = devm_clk_get(dsi->dev, "tx_esc");
+       if (IS_ERR(clk)) {
+               ret = PTR_ERR(clk);
+               DRM_DEV_ERROR(dsi->dev, "Failed to get tx_esc clock: %d\n",
+                             ret);
+               return ret;
+       }
+       dsi->tx_esc_clk = clk;
+
+       dsi->mux = devm_mux_control_get(dsi->dev, NULL);
+       if (IS_ERR(dsi->mux)) {
+               ret = PTR_ERR(dsi->mux);
+               if (ret != -EPROBE_DEFER)
+                       DRM_DEV_ERROR(dsi->dev, "Failed to get mux: %d\n", ret);
+               return ret;
+       }
+
+       base = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(base))
+               return PTR_ERR(base);
+
+       dsi->regmap =
+               devm_regmap_init_mmio(dsi->dev, base, &nwl_dsi_regmap_config);
+       if (IS_ERR(dsi->regmap)) {
+               ret = PTR_ERR(dsi->regmap);
+               DRM_DEV_ERROR(dsi->dev, "Failed to create NWL DSI regmap: %d\n",
+                             ret);
+               return ret;
+       }
+
+       dsi->irq = platform_get_irq(pdev, 0);
+       if (dsi->irq < 0) {
+               DRM_DEV_ERROR(dsi->dev, "Failed to get device IRQ: %d\n",
+                             dsi->irq);
+               return dsi->irq;
+       }
+
+       dsi->rst_pclk = devm_reset_control_get_exclusive(dsi->dev, "pclk");
+       if (IS_ERR(dsi->rst_pclk)) {
+               DRM_DEV_ERROR(dsi->dev, "Failed to get pclk reset: %ld\n",
+                             PTR_ERR(dsi->rst_pclk));
+               return PTR_ERR(dsi->rst_pclk);
+       }
+       dsi->rst_byte = devm_reset_control_get_exclusive(dsi->dev, "byte");
+       if (IS_ERR(dsi->rst_byte)) {
+               DRM_DEV_ERROR(dsi->dev, "Failed to get byte reset: %ld\n",
+                             PTR_ERR(dsi->rst_byte));
+               return PTR_ERR(dsi->rst_byte);
+       }
+       dsi->rst_esc = devm_reset_control_get_exclusive(dsi->dev, "esc");
+       if (IS_ERR(dsi->rst_esc)) {
+               DRM_DEV_ERROR(dsi->dev, "Failed to get esc reset: %ld\n",
+                             PTR_ERR(dsi->rst_esc));
+               return PTR_ERR(dsi->rst_esc);
+       }
+       dsi->rst_dpi = devm_reset_control_get_exclusive(dsi->dev, "dpi");
+       if (IS_ERR(dsi->rst_dpi)) {
+               DRM_DEV_ERROR(dsi->dev, "Failed to get dpi reset: %ld\n",
+                             PTR_ERR(dsi->rst_dpi));
+               return PTR_ERR(dsi->rst_dpi);
+       }
+       return 0;
+}
+
+static int nwl_dsi_select_input(struct nwl_dsi *dsi)
+{
+       struct device_node *remote;
+       u32 use_dcss = 1;
+       int ret;
+
+       remote = of_graph_get_remote_node(dsi->dev->of_node, 0,
+                                         NWL_DSI_ENDPOINT_LCDIF);
+       if (remote) {
+               use_dcss = 0;
+       } else {
+               remote = of_graph_get_remote_node(dsi->dev->of_node, 0,
+                                                 NWL_DSI_ENDPOINT_DCSS);
+               if (!remote) {
+                       DRM_DEV_ERROR(dsi->dev,
+                                     "No valid input endpoint found\n");
+                       return -EINVAL;
+               }
+       }
+
+       DRM_DEV_INFO(dsi->dev, "Using %s as input source\n",
+                    (use_dcss) ? "DCSS" : "LCDIF");
+       ret = mux_control_try_select(dsi->mux, use_dcss);
+       if (ret < 0)
+               DRM_DEV_ERROR(dsi->dev, "Failed to select input: %d\n", ret);
+
+       of_node_put(remote);
+       return ret;
+}
+
+static int nwl_dsi_deselect_input(struct nwl_dsi *dsi)
+{
+       int ret;
+
+       ret = mux_control_deselect(dsi->mux);
+       if (ret < 0)
+               DRM_DEV_ERROR(dsi->dev, "Failed to deselect input: %d\n", ret);
+
+       return ret;
+}
+
+static const struct drm_bridge_timings nwl_dsi_timings = {
+       .input_bus_flags = DRM_BUS_FLAG_DE_LOW,
+};
+
+static const struct of_device_id nwl_dsi_dt_ids[] = {
+       { .compatible = "fsl,imx8mq-nwl-dsi", },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, nwl_dsi_dt_ids);
+
+static const struct soc_device_attribute nwl_dsi_quirks_match[] = {
+       { .soc_id = "i.MX8MQ", .revision = "2.0",
+         .data = (void *)E11418_HS_MODE_QUIRK },
+       { /* sentinel. */ },
+};
+
+static int nwl_dsi_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       const struct soc_device_attribute *attr;
+       struct nwl_dsi *dsi;
+       int ret;
+
+       dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
+       if (!dsi)
+               return -ENOMEM;
+
+       dsi->dev = dev;
+
+       ret = nwl_dsi_parse_dt(dsi);
+       if (ret)
+               return ret;
+
+       ret = devm_request_irq(dev, dsi->irq, nwl_dsi_irq_handler, 0,
+                              dev_name(dev), dsi);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dev, "Failed to request IRQ %d: %d\n", dsi->irq,
+                             ret);
+               return ret;
+       }
+
+       dsi->dsi_host.ops = &nwl_dsi_host_ops;
+       dsi->dsi_host.dev = dev;
+       ret = mipi_dsi_host_register(&dsi->dsi_host);
+       if (ret) {
+               DRM_DEV_ERROR(dev, "Failed to register MIPI host: %d\n", ret);
+               return ret;
+       }
+
+       attr = soc_device_match(nwl_dsi_quirks_match);
+       if (attr)
+               dsi->quirks = (uintptr_t)attr->data;
+
+       dsi->bridge.driver_private = dsi;
+       dsi->bridge.funcs = &nwl_dsi_bridge_funcs;
+       dsi->bridge.of_node = dev->of_node;
+       dsi->bridge.timings = &nwl_dsi_timings;
+
+       dev_set_drvdata(dev, dsi);
+       pm_runtime_enable(dev);
+
+       ret = nwl_dsi_select_input(dsi);
+       if (ret < 0) {
+               mipi_dsi_host_unregister(&dsi->dsi_host);
+               return ret;
+       }
+
+       drm_bridge_add(&dsi->bridge);
+       return 0;
+}
+
+static int nwl_dsi_remove(struct platform_device *pdev)
+{
+       struct nwl_dsi *dsi = platform_get_drvdata(pdev);
+
+       nwl_dsi_deselect_input(dsi);
+       mipi_dsi_host_unregister(&dsi->dsi_host);
+       drm_bridge_remove(&dsi->bridge);
+       pm_runtime_disable(&pdev->dev);
+       return 0;
+}
+
+static struct platform_driver nwl_dsi_driver = {
+       .probe          = nwl_dsi_probe,
+       .remove         = nwl_dsi_remove,
+       .driver         = {
+               .of_match_table = nwl_dsi_dt_ids,
+               .name   = DRV_NAME,
+       },
+};
+
+module_platform_driver(nwl_dsi_driver);
+
+MODULE_AUTHOR("NXP Semiconductor");
+MODULE_AUTHOR("Purism SPC");
+MODULE_DESCRIPTION("Northwest Logic MIPI-DSI driver");
+MODULE_LICENSE("GPL"); /* GPLv2 or later */
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.h b/drivers/gpu/drm/bridge/nwl-dsi.h
new file mode 100644 (file)
index 0000000..a247a8a
--- /dev/null
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * NWL MIPI DSI host driver
+ *
+ * Copyright (C) 2017 NXP
+ * Copyright (C) 2019 Purism SPC
+ */
+#ifndef __NWL_DSI_H__
+#define __NWL_DSI_H__
+
+/* DSI HOST registers */
+#define NWL_DSI_CFG_NUM_LANES                  0x0
+#define NWL_DSI_CFG_NONCONTINUOUS_CLK          0x4
+#define NWL_DSI_CFG_T_PRE                      0x8
+#define NWL_DSI_CFG_T_POST                     0xc
+#define NWL_DSI_CFG_TX_GAP                     0x10
+#define NWL_DSI_CFG_AUTOINSERT_EOTP            0x14
+#define NWL_DSI_CFG_EXTRA_CMDS_AFTER_EOTP      0x18
+#define NWL_DSI_CFG_HTX_TO_COUNT               0x1c
+#define NWL_DSI_CFG_LRX_H_TO_COUNT             0x20
+#define NWL_DSI_CFG_BTA_H_TO_COUNT             0x24
+#define NWL_DSI_CFG_TWAKEUP                    0x28
+#define NWL_DSI_CFG_STATUS_OUT                 0x2c
+#define NWL_DSI_RX_ERROR_STATUS                        0x30
+
+/* DSI DPI registers */
+#define NWL_DSI_PIXEL_PAYLOAD_SIZE             0x200
+#define NWL_DSI_PIXEL_FIFO_SEND_LEVEL          0x204
+#define NWL_DSI_INTERFACE_COLOR_CODING         0x208
+#define NWL_DSI_PIXEL_FORMAT                   0x20c
+#define NWL_DSI_VSYNC_POLARITY                 0x210
+#define NWL_DSI_VSYNC_POLARITY_ACTIVE_LOW      0
+#define NWL_DSI_VSYNC_POLARITY_ACTIVE_HIGH     BIT(1)
+
+#define NWL_DSI_HSYNC_POLARITY                 0x214
+#define NWL_DSI_HSYNC_POLARITY_ACTIVE_LOW      0
+#define NWL_DSI_HSYNC_POLARITY_ACTIVE_HIGH     BIT(1)
+
+#define NWL_DSI_VIDEO_MODE                     0x218
+#define NWL_DSI_HFP                            0x21c
+#define NWL_DSI_HBP                            0x220
+#define NWL_DSI_HSA                            0x224
+#define NWL_DSI_ENABLE_MULT_PKTS               0x228
+#define NWL_DSI_VBP                            0x22c
+#define NWL_DSI_VFP                            0x230
+#define NWL_DSI_BLLP_MODE                      0x234
+#define NWL_DSI_USE_NULL_PKT_BLLP              0x238
+#define NWL_DSI_VACTIVE                                0x23c
+#define NWL_DSI_VC                             0x240
+
+/* DSI APB PKT control */
+#define NWL_DSI_TX_PAYLOAD                     0x280
+#define NWL_DSI_PKT_CONTROL                    0x284
+#define NWL_DSI_SEND_PACKET                    0x288
+#define NWL_DSI_PKT_STATUS                     0x28c
+#define NWL_DSI_PKT_FIFO_WR_LEVEL              0x290
+#define NWL_DSI_PKT_FIFO_RD_LEVEL              0x294
+#define NWL_DSI_RX_PAYLOAD                     0x298
+#define NWL_DSI_RX_PKT_HEADER                  0x29c
+
+/* DSI IRQ handling */
+#define NWL_DSI_IRQ_STATUS                     0x2a0
+#define NWL_DSI_SM_NOT_IDLE                    BIT(0)
+#define NWL_DSI_TX_PKT_DONE                    BIT(1)
+#define NWL_DSI_DPHY_DIRECTION                 BIT(2)
+#define NWL_DSI_TX_FIFO_OVFLW                  BIT(3)
+#define NWL_DSI_TX_FIFO_UDFLW                  BIT(4)
+#define NWL_DSI_RX_FIFO_OVFLW                  BIT(5)
+#define NWL_DSI_RX_FIFO_UDFLW                  BIT(6)
+#define NWL_DSI_RX_PKT_HDR_RCVD                        BIT(7)
+#define NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD       BIT(8)
+#define NWL_DSI_BTA_TIMEOUT                    BIT(29)
+#define NWL_DSI_LP_RX_TIMEOUT                  BIT(30)
+#define NWL_DSI_HS_TX_TIMEOUT                  BIT(31)
+
+#define NWL_DSI_IRQ_STATUS2                    0x2a4
+#define NWL_DSI_SINGLE_BIT_ECC_ERR             BIT(0)
+#define NWL_DSI_MULTI_BIT_ECC_ERR              BIT(1)
+#define NWL_DSI_CRC_ERR                                BIT(2)
+
+#define NWL_DSI_IRQ_MASK                       0x2a8
+#define NWL_DSI_SM_NOT_IDLE_MASK               BIT(0)
+#define NWL_DSI_TX_PKT_DONE_MASK               BIT(1)
+#define NWL_DSI_DPHY_DIRECTION_MASK            BIT(2)
+#define NWL_DSI_TX_FIFO_OVFLW_MASK             BIT(3)
+#define NWL_DSI_TX_FIFO_UDFLW_MASK             BIT(4)
+#define NWL_DSI_RX_FIFO_OVFLW_MASK             BIT(5)
+#define NWL_DSI_RX_FIFO_UDFLW_MASK             BIT(6)
+#define NWL_DSI_RX_PKT_HDR_RCVD_MASK           BIT(7)
+#define NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD_MASK  BIT(8)
+#define NWL_DSI_BTA_TIMEOUT_MASK               BIT(29)
+#define NWL_DSI_LP_RX_TIMEOUT_MASK             BIT(30)
+#define NWL_DSI_HS_TX_TIMEOUT_MASK             BIT(31)
+
+#define NWL_DSI_IRQ_MASK2                      0x2ac
+#define NWL_DSI_SINGLE_BIT_ECC_ERR_MASK                BIT(0)
+#define NWL_DSI_MULTI_BIT_ECC_ERR_MASK         BIT(1)
+#define NWL_DSI_CRC_ERR_MASK                   BIT(2)
+
+/*
+ * PKT_CONTROL format:
+ * [15: 0] - word count
+ * [17:16] - virtual channel
+ * [23:18] - data type
+ * [24]           - LP or HS select (0 - LP, 1 - HS)
+ * [25]           - perform BTA after packet is sent
+ * [26]           - perform BTA only, no packet tx
+ */
+#define NWL_DSI_WC(x)          FIELD_PREP(GENMASK(15, 0), (x))
+#define NWL_DSI_TX_VC(x)       FIELD_PREP(GENMASK(17, 16), (x))
+#define NWL_DSI_TX_DT(x)       FIELD_PREP(GENMASK(23, 18), (x))
+#define NWL_DSI_HS_SEL(x)      FIELD_PREP(GENMASK(24, 24), (x))
+#define NWL_DSI_BTA_TX(x)      FIELD_PREP(GENMASK(25, 25), (x))
+#define NWL_DSI_BTA_NO_TX(x)   FIELD_PREP(GENMASK(26, 26), (x))
+
+/*
+ * RX_PKT_HEADER format:
+ * [15: 0] - word count
+ * [21:16] - data type
+ * [23:22] - virtual channel
+ */
+#define NWL_DSI_RX_DT(x)       FIELD_GET(GENMASK(21, 16), (x))
+#define NWL_DSI_RX_VC(x)       FIELD_GET(GENMASK(23, 22), (x))
+
+/* DSI Video mode */
+#define NWL_DSI_VM_BURST_MODE_WITH_SYNC_PULSES         0
+#define NWL_DSI_VM_NON_BURST_MODE_WITH_SYNC_EVENTS     BIT(0)
+#define NWL_DSI_VM_BURST_MODE                          BIT(1)
+
+/* * DPI color coding */
+#define NWL_DSI_DPI_16_BIT_565_PACKED  0
+#define NWL_DSI_DPI_16_BIT_565_ALIGNED 1
+#define NWL_DSI_DPI_16_BIT_565_SHIFTED 2
+#define NWL_DSI_DPI_18_BIT_PACKED      3
+#define NWL_DSI_DPI_18_BIT_ALIGNED     4
+#define NWL_DSI_DPI_24_BIT             5
+
+/* * DPI Pixel format */
+#define NWL_DSI_PIXEL_FORMAT_16  0
+#define NWL_DSI_PIXEL_FORMAT_18  BIT(0)
+#define NWL_DSI_PIXEL_FORMAT_18L BIT(1)
+#define NWL_DSI_PIXEL_FORMAT_24  (BIT(0) | BIT(1))
+
+#endif /* __NWL_DSI_H__ */
index 8461ee8304ba75bb160b9cda422be3a9f2ad6a82..1e63ed6b18aa4f9b6013382696bb8b08f0ef5e9a 100644 (file)
@@ -166,7 +166,7 @@ static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
  *
  * The connector type is set to @panel->connector_type, which must be set to a
  * known type. Calling this function with a panel whose connector type is
- * DRM_MODE_CONNECTOR_Unknown will return NULL.
+ * DRM_MODE_CONNECTOR_Unknown will return ERR_PTR(-EINVAL).
  *
  * See devm_drm_panel_bridge_add() for an automatically managed version of this
  * function.
@@ -174,7 +174,7 @@ static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
 struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel)
 {
        if (WARN_ON(panel->connector_type == DRM_MODE_CONNECTOR_Unknown))
-               return NULL;
+               return ERR_PTR(-EINVAL);
 
        return drm_panel_bridge_add_typed(panel, panel->connector_type);
 }
@@ -265,7 +265,7 @@ struct drm_bridge *devm_drm_panel_bridge_add(struct device *dev,
                                             struct drm_panel *panel)
 {
        if (WARN_ON(panel->connector_type == DRM_MODE_CONNECTOR_Unknown))
-               return NULL;
+               return ERR_PTR(-EINVAL);
 
        return devm_drm_panel_bridge_add_typed(dev, panel,
                                               panel->connector_type);
@@ -311,6 +311,7 @@ EXPORT_SYMBOL(devm_drm_panel_bridge_add_typed);
 
 /**
  * drm_panel_bridge_connector - return the connector for the panel bridge
+ * @bridge: The drm_bridge.
  *
  * drm_panel_bridge creates the connector.
  * This function gives external access to the connector.
index d3a53442d449a4a53ddc0f588bc57bd53c2146b8..4b099196afeba12815bf41cc00372cbd4e8fd297 100644 (file)
@@ -268,8 +268,6 @@ static int ps8640_probe(struct i2c_client *client)
        if (!panel)
                return -ENODEV;
 
-       panel->connector_type = DRM_MODE_CONNECTOR_eDP;
-
        ps_bridge->panel_bridge = devm_drm_panel_bridge_add(dev, panel);
        if (IS_ERR(ps_bridge->panel_bridge))
                return PTR_ERR(ps_bridge->panel_bridge);
index f81f81b7051f6df165de89c5df8c5583bc14982d..b1258f0ed20551b85072138bf0ad3c9cb56fd3bc 100644 (file)
@@ -836,7 +836,8 @@ static int sii9234_init_resources(struct sii9234 *ctx,
        ctx->supplies[3].supply = "cvcc12";
        ret = devm_regulator_bulk_get(ctx->dev, 4, ctx->supplies);
        if (ret) {
-               dev_err(ctx->dev, "regulator_bulk failed\n");
+               if (ret != -EPROBE_DEFER)
+                       dev_err(ctx->dev, "regulator_bulk failed\n");
                return ret;
        }
 
index 383b1073d7de43cf1afe41d8d611b79ea5be3591..30681398cfb0576af7698219aa0ec7693f72ad2f 100644 (file)
@@ -92,6 +92,12 @@ static const u16 csc_coeff_rgb_in_eitu709[3][4] = {
        { 0x6756, 0x78ab, 0x2000, 0x0200 }
 };
 
+static const u16 csc_coeff_rgb_full_to_rgb_limited[3][4] = {
+       { 0x1b7c, 0x0000, 0x0000, 0x0020 },
+       { 0x0000, 0x1b7c, 0x0000, 0x0020 },
+       { 0x0000, 0x0000, 0x1b7c, 0x0020 }
+};
+
 struct hdmi_vmode {
        bool mdataenablepolarity;
 
@@ -109,6 +115,7 @@ struct hdmi_data_info {
        unsigned int pix_repet_factor;
        unsigned int hdcp_enable;
        struct hdmi_vmode video_mode;
+       bool rgb_limited_range;
 };
 
 struct dw_hdmi_i2c {
@@ -956,7 +963,14 @@ static void hdmi_video_sample(struct dw_hdmi *hdmi)
 
 static int is_color_space_conversion(struct dw_hdmi *hdmi)
 {
-       return hdmi->hdmi_data.enc_in_bus_format != hdmi->hdmi_data.enc_out_bus_format;
+       struct hdmi_data_info *hdmi_data = &hdmi->hdmi_data;
+       bool is_input_rgb, is_output_rgb;
+
+       is_input_rgb = hdmi_bus_fmt_is_rgb(hdmi_data->enc_in_bus_format);
+       is_output_rgb = hdmi_bus_fmt_is_rgb(hdmi_data->enc_out_bus_format);
+
+       return (is_input_rgb != is_output_rgb) ||
+              (is_input_rgb && is_output_rgb && hdmi_data->rgb_limited_range);
 }
 
 static int is_color_space_decimation(struct dw_hdmi *hdmi)
@@ -983,28 +997,37 @@ static int is_color_space_interpolation(struct dw_hdmi *hdmi)
        return 0;
 }
 
+static bool is_csc_needed(struct dw_hdmi *hdmi)
+{
+       return is_color_space_conversion(hdmi) ||
+              is_color_space_decimation(hdmi) ||
+              is_color_space_interpolation(hdmi);
+}
+
 static void dw_hdmi_update_csc_coeffs(struct dw_hdmi *hdmi)
 {
        const u16 (*csc_coeff)[3][4] = &csc_coeff_default;
+       bool is_input_rgb, is_output_rgb;
        unsigned i;
        u32 csc_scale = 1;
 
-       if (is_color_space_conversion(hdmi)) {
-               if (hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format)) {
-                       if (hdmi->hdmi_data.enc_out_encoding ==
-                                               V4L2_YCBCR_ENC_601)
-                               csc_coeff = &csc_coeff_rgb_out_eitu601;
-                       else
-                               csc_coeff = &csc_coeff_rgb_out_eitu709;
-               } else if (hdmi_bus_fmt_is_rgb(
-                                       hdmi->hdmi_data.enc_in_bus_format)) {
-                       if (hdmi->hdmi_data.enc_out_encoding ==
-                                               V4L2_YCBCR_ENC_601)
-                               csc_coeff = &csc_coeff_rgb_in_eitu601;
-                       else
-                               csc_coeff = &csc_coeff_rgb_in_eitu709;
-                       csc_scale = 0;
-               }
+       is_input_rgb = hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_in_bus_format);
+       is_output_rgb = hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format);
+
+       if (!is_input_rgb && is_output_rgb) {
+               if (hdmi->hdmi_data.enc_out_encoding == V4L2_YCBCR_ENC_601)
+                       csc_coeff = &csc_coeff_rgb_out_eitu601;
+               else
+                       csc_coeff = &csc_coeff_rgb_out_eitu709;
+       } else if (is_input_rgb && !is_output_rgb) {
+               if (hdmi->hdmi_data.enc_out_encoding == V4L2_YCBCR_ENC_601)
+                       csc_coeff = &csc_coeff_rgb_in_eitu601;
+               else
+                       csc_coeff = &csc_coeff_rgb_in_eitu709;
+               csc_scale = 0;
+       } else if (is_input_rgb && is_output_rgb &&
+                  hdmi->hdmi_data.rgb_limited_range) {
+               csc_coeff = &csc_coeff_rgb_full_to_rgb_limited;
        }
 
        /* The CSC registers are sequential, alternating MSB then LSB */
@@ -1614,6 +1637,18 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
        drm_hdmi_avi_infoframe_from_display_mode(&frame,
                                                 &hdmi->connector, mode);
 
+       if (hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format)) {
+               drm_hdmi_avi_infoframe_quant_range(&frame, &hdmi->connector,
+                                                  mode,
+                                                  hdmi->hdmi_data.rgb_limited_range ?
+                                                  HDMI_QUANTIZATION_RANGE_LIMITED :
+                                                  HDMI_QUANTIZATION_RANGE_FULL);
+       } else {
+               frame.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
+               frame.ycc_quantization_range =
+                       HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
+       }
+
        if (hdmi_bus_fmt_is_yuv444(hdmi->hdmi_data.enc_out_bus_format))
                frame.colorspace = HDMI_COLORSPACE_YUV444;
        else if (hdmi_bus_fmt_is_yuv422(hdmi->hdmi_data.enc_out_bus_format))
@@ -1654,8 +1689,6 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
                        HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
        }
 
-       frame.scan_mode = HDMI_SCAN_MODE_NONE;
-
        /*
         * The Designware IP uses a different byte format from standard
         * AVI info frames, though generally the bits are in the correct
@@ -2010,18 +2043,19 @@ static void dw_hdmi_enable_video_path(struct dw_hdmi *hdmi)
        hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
 
        /* Enable csc path */
-       if (is_color_space_conversion(hdmi)) {
+       if (is_csc_needed(hdmi)) {
                hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_CSCCLK_DISABLE;
                hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
-       }
 
-       /* Enable color space conversion if needed */
-       if (is_color_space_conversion(hdmi))
                hdmi_writeb(hdmi, HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_IN_PATH,
                            HDMI_MC_FLOWCTRL);
-       else
+       } else {
+               hdmi->mc_clkdis |= HDMI_MC_CLKDIS_CSCCLK_DISABLE;
+               hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
+
                hdmi_writeb(hdmi, HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_BYPASS,
                            HDMI_MC_FLOWCTRL);
+       }
 }
 
 /* Workaround to clear the overflow condition */
@@ -2119,6 +2153,10 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
        if (hdmi->hdmi_data.enc_out_bus_format == MEDIA_BUS_FMT_FIXED)
                hdmi->hdmi_data.enc_out_bus_format = MEDIA_BUS_FMT_RGB888_1X24;
 
+       hdmi->hdmi_data.rgb_limited_range = hdmi->sink_is_hdmi &&
+               drm_default_rgb_quant_range(mode) ==
+               HDMI_QUANTIZATION_RANGE_LIMITED;
+
        hdmi->hdmi_data.pix_repet_factor = 0;
        hdmi->hdmi_data.hdcp_enable = 0;
        hdmi->hdmi_data.video_mode.mdataenablepolarity = true;
index 1b39e8d37834a2e095944a65623ff3833834a26c..6650fe4cfc20f79585eb1591f3437425dc033d5a 100644 (file)
@@ -178,6 +178,8 @@ static int tc358768_clear_error(struct tc358768_priv *priv)
 
 static void tc358768_write(struct tc358768_priv *priv, u32 reg, u32 val)
 {
+       /* work around https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
+       int tmpval = val;
        size_t count = 2;
 
        if (priv->error)
@@ -187,7 +189,7 @@ static void tc358768_write(struct tc358768_priv *priv, u32 reg, u32 val)
        if (reg < 0x100 || reg >= 0x600)
                count = 1;
 
-       priv->error = regmap_bulk_write(priv->regmap, reg, &val, count);
+       priv->error = regmap_bulk_write(priv->regmap, reg, &tmpval, count);
 }
 
 static void tc358768_read(struct tc358768_priv *priv, u32 reg, u32 *val)
diff --git a/drivers/gpu/drm/cirrus/Kconfig b/drivers/gpu/drm/cirrus/Kconfig
deleted file mode 100644 (file)
index c6bbd98..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config DRM_CIRRUS_QEMU
-       tristate "Cirrus driver for QEMU emulated device"
-       depends on DRM && PCI && MMU
-       select DRM_KMS_HELPER
-       select DRM_GEM_SHMEM_HELPER
-       help
-        This is a KMS driver for emulated cirrus device in qemu.
-        It is *NOT* intended for real cirrus devices. This requires
-        the modesetting userspace X.org driver.
-
-        Cirrus is obsolete, the hardware was designed in the 90ies
-        and can't keep up with todays needs.  More background:
-        https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
-
-        Better alternatives are:
-          - stdvga (DRM_BOCHS, qemu -vga std, default in qemu 2.2+)
-          - qxl (DRM_QXL, qemu -vga qxl, works best with spice)
-          - virtio (DRM_VIRTIO_GPU), qemu -vga virtio)
diff --git a/drivers/gpu/drm/cirrus/Makefile b/drivers/gpu/drm/cirrus/Makefile
deleted file mode 100644 (file)
index 0c1ed3f..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus.o
index 9ccfbf213d72609038bd1d79c85d285e5cba719d..965173fd0ac2549f4b419dd5814f3bd97052df9b 100644 (file)
@@ -1641,10 +1641,10 @@ static const struct drm_info_list drm_atomic_debugfs_list[] = {
        {"state", drm_state_info, 0},
 };
 
-int drm_atomic_debugfs_init(struct drm_minor *minor)
+void drm_atomic_debugfs_init(struct drm_minor *minor)
 {
-       return drm_debugfs_create_files(drm_atomic_debugfs_list,
-                       ARRAY_SIZE(drm_atomic_debugfs_list),
-                       minor->debugfs_root, minor);
+       drm_debugfs_create_files(drm_atomic_debugfs_list,
+                                ARRAY_SIZE(drm_atomic_debugfs_list),
+                                minor->debugfs_root, minor);
 }
 #endif
index 531b876d0ed83c7ce9e4419c309b446ada63f9d7..800ac39f3213df4f55304be6bbe5282f9ee3f768 100644 (file)
@@ -135,6 +135,7 @@ static int drm_set_master(struct drm_device *dev, struct drm_file *fpriv,
                }
        }
 
+       fpriv->was_master = (ret == 0);
        return ret;
 }
 
@@ -174,17 +175,77 @@ out_err:
        return ret;
 }
 
+/*
+ * In the olden days the SET/DROP_MASTER ioctls used to return EACCES when
+ * CAP_SYS_ADMIN was not set. This was used to prevent rogue applications
+ * from becoming master and/or failing to release it.
+ *
+ * At the same time, the first client (for a given VT) is _always_ master.
+ * Thus in order for the ioctls to succeed, one had to _explicitly_ run the
+ * application as root or flip the setuid bit.
+ *
+ * If the CAP_SYS_ADMIN was missing, no other client could become master...
+ * EVER :-( Leading to a) the graphics session dying badly or b) a completely
+ * locked session.
+ *
+ *
+ * As some point systemd-logind was introduced to orchestrate and delegate
+ * master as applicable. It does so by opening the fd and passing it to users
+ * while in itself logind a) does the set/drop master per users' request and
+ * b)  * implicitly drops master on VT switch.
+ *
+ * Even though logind looks like the future, there are a few issues:
+ *  - some platforms don't have equivalent (Android, CrOS, some BSDs) so
+ * root is required _solely_ for SET/DROP MASTER.
+ *  - applications may not be updated to use it,
+ *  - any client which fails to drop master* can DoS the application using
+ * logind, to a varying degree.
+ *
+ * * Either due missing CAP_SYS_ADMIN or simply not calling DROP_MASTER.
+ *
+ *
+ * Here we implement the next best thing:
+ *  - ensure the logind style of fd passing works unchanged, and
+ *  - allow a client to drop/set master, iff it is/was master at a given point
+ * in time.
+ *
+ * Note: DROP_MASTER cannot be free for all, as an arbitrator user could:
+ *  - DoS/crash the arbitrator - details would be implementation specific
+ *  - open the node, become master implicitly and cause issues
+ *
+ * As a result this fixes the following when using root-less build w/o logind
+ * - startx
+ * - weston
+ * - various compositors based on wlroots
+ */
+static int
+drm_master_check_perm(struct drm_device *dev, struct drm_file *file_priv)
+{
+       if (file_priv->pid == task_pid(current) && file_priv->was_master)
+               return 0;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EACCES;
+
+       return 0;
+}
+
 int drm_setmaster_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file_priv)
 {
        int ret = 0;
 
        mutex_lock(&dev->master_mutex);
+
+       ret = drm_master_check_perm(dev, file_priv);
+       if (ret)
+               goto out_unlock;
+
        if (drm_is_current_master(file_priv))
                goto out_unlock;
 
        if (dev->master) {
-               ret = -EINVAL;
+               ret = -EBUSY;
                goto out_unlock;
        }
 
@@ -224,6 +285,12 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
        int ret = -EINVAL;
 
        mutex_lock(&dev->master_mutex);
+
+       ret = drm_master_check_perm(dev, file_priv);
+       if (ret)
+               goto out_unlock;
+
+       ret = -EINVAL;
        if (!drm_is_current_master(file_priv))
                goto out_unlock;
 
index 121481f6aa7149c668b77c3bb0326dcda2fe9cef..f1dcad96f34170f912f397b7e212165c615ee279 100644 (file)
  *     are underneath planes with higher Z position values. Two planes with the
  *     same Z position value have undefined ordering. Note that the Z position
  *     value can also be immutable, to inform userspace about the hard-coded
- *     stacking of planes, see drm_plane_create_zpos_immutable_property().
+ *     stacking of planes, see drm_plane_create_zpos_immutable_property(). If
+ *     any plane has a zpos property (either mutable or immutable), then all
+ *     planes shall have a zpos property.
  *
  * pixel blend mode:
  *     Pixel blend mode is set up with drm_plane_create_blend_mode_property().
  *              plane does not expose the "alpha" property, then this is
  *              assumed to be 1.0
  *
+ * IN_FORMATS:
+ *     Blob property which contains the set of buffer format and modifier
+ *     pairs supported by this plane. The blob is a drm_format_modifier_blob
+ *     struct. Without this property the plane doesn't support buffers with
+ *     modifiers. Userspace cannot change this property.
+ *
  * Note that all the property extensions described here apply either to the
  * plane or the CRTC (e.g. for the background color, which currently is not
  * exposed and assumed to be black).
@@ -338,10 +346,10 @@ EXPORT_SYMBOL(drm_rotation_simplify);
  * should be set to 0 and max to maximal number of planes for given crtc - 1.
  *
  * If zpos of some planes cannot be changed (like fixed background or
- * cursor/topmost planes), driver should adjust min/max values and assign those
- * planes immutable zpos property with lower or higher values (for more
+ * cursor/topmost planes), drivers shall adjust the min/max values and assign
+ * those planes immutable zpos properties with lower or higher values (for more
  * information, see drm_plane_create_zpos_immutable_property() function). In such
- * case driver should also assign proper initial zpos values for all planes in
+ * case drivers shall also assign proper initial zpos values for all planes in
  * its plane_reset() callback, so the planes will be always sorted properly.
  *
  * See also drm_atomic_normalize_zpos().
index dcabf56983336d1055d59e81fa12d2cfc9ff2b9a..ef26ac57f0394c118ee1cc6bf953e79ee8cb831d 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/mm.h>
 #include <linux/mman.h>
 #include <linux/nospec.h>
+#include <linux/pci.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
 #include <linux/vmalloc.h>
@@ -43,7 +44,6 @@
 #include <drm/drm_device.h>
 #include <drm/drm_drv.h>
 #include <drm/drm_file.h>
-#include <drm/drm_pci.h>
 #include <drm/drm_print.h>
 
 #include "drm_legacy.h"
index 6b0c6ef8b9b396de2d5d26127f3862352a6e87f5..8cb93f5209a4bc524ce73962f4623c13c6f48735 100644 (file)
@@ -457,10 +457,10 @@ static const struct drm_info_list drm_client_debugfs_list[] = {
        { "internal_clients", drm_client_debugfs_internal_clients, 0 },
 };
 
-int drm_client_debugfs_init(struct drm_minor *minor)
+void drm_client_debugfs_init(struct drm_minor *minor)
 {
-       return drm_debugfs_create_files(drm_client_debugfs_list,
-                                       ARRAY_SIZE(drm_client_debugfs_list),
-                                       minor->debugfs_root, minor);
+       drm_debugfs_create_files(drm_client_debugfs_list,
+                                ARRAY_SIZE(drm_client_debugfs_list),
+                                minor->debugfs_root, minor);
 }
 #endif
index 644f0ad1067176824dafdaccb3d85a7da83247e0..b1099e1251a265984dc18e2a9aaa020c326344a6 100644 (file)
@@ -1970,6 +1970,8 @@ int drm_connector_update_edid_property(struct drm_connector *connector,
        else
                drm_reset_display_info(connector);
 
+       drm_update_tile_info(connector, edid);
+
        drm_object_property_set_value(&connector->base,
                                      dev->mode_config.non_desktop_property,
                                      connector->display_info.non_desktop);
@@ -2392,7 +2394,7 @@ EXPORT_SYMBOL(drm_mode_put_tile_group);
  * tile group or NULL if not found.
  */
 struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
-                                              char topology[8])
+                                              const char topology[8])
 {
        struct drm_tile_group *tg;
        int id;
@@ -2422,7 +2424,7 @@ EXPORT_SYMBOL(drm_mode_get_tile_group);
  * new tile group or NULL.
  */
 struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
-                                                 char topology[8])
+                                                 const char topology[8])
 {
        struct drm_tile_group *tg;
        int ret;
index 16f2413403aa2fa01bf1df2f4a89fea6c36c4000..da96b2f64d7e4366235131750c926f77973d23b1 100644 (file)
@@ -82,6 +82,7 @@ int drm_mode_setcrtc(struct drm_device *dev,
 /* drm_mode_config.c */
 int drm_modeset_register_all(struct drm_device *dev);
 void drm_modeset_unregister_all(struct drm_device *dev);
+void drm_mode_config_validate(struct drm_device *dev);
 
 /* drm_modes.c */
 const char *drm_get_mode_status_name(enum drm_mode_status status);
@@ -224,7 +225,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
 /* drm_atomic.c */
 #ifdef CONFIG_DEBUG_FS
 struct drm_minor;
-int drm_atomic_debugfs_init(struct drm_minor *minor);
+void drm_atomic_debugfs_init(struct drm_minor *minor);
 #endif
 
 int __drm_atomic_helper_disable_plane(struct drm_plane *plane,
@@ -278,3 +279,4 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
 void drm_mode_fixup_1366x768(struct drm_display_mode *mode);
 void drm_reset_display_info(struct drm_connector *connector);
 u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid);
+void drm_update_tile_info(struct drm_connector *connector, const struct edid *edid);
index 4e673d318503c6f3685e4467e720b4f991c43e00..2bea221307037a1329c13eea997b7527043720e4 100644 (file)
@@ -172,8 +172,8 @@ static const struct file_operations drm_debugfs_fops = {
  * &struct drm_info_list in the given root directory. These files will be removed
  * automatically on drm_debugfs_cleanup().
  */
-int drm_debugfs_create_files(const struct drm_info_list *files, int count,
-                            struct dentry *root, struct drm_minor *minor)
+void drm_debugfs_create_files(const struct drm_info_list *files, int count,
+                             struct dentry *root, struct drm_minor *minor)
 {
        struct drm_device *dev = minor->dev;
        struct drm_info_node *tmp;
@@ -199,7 +199,6 @@ int drm_debugfs_create_files(const struct drm_info_list *files, int count,
                list_add(&tmp->list, &minor->debugfs_list);
                mutex_unlock(&minor->debugfs_lock);
        }
-       return 0;
 }
 EXPORT_SYMBOL(drm_debugfs_create_files);
 
@@ -208,52 +207,28 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
 {
        struct drm_device *dev = minor->dev;
        char name[64];
-       int ret;
 
        INIT_LIST_HEAD(&minor->debugfs_list);
        mutex_init(&minor->debugfs_lock);
        sprintf(name, "%d", minor_id);
        minor->debugfs_root = debugfs_create_dir(name, root);
 
-       ret = drm_debugfs_create_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES,
-                                      minor->debugfs_root, minor);
-       if (ret) {
-               debugfs_remove(minor->debugfs_root);
-               minor->debugfs_root = NULL;
-               DRM_ERROR("Failed to create core drm debugfs files\n");
-               return ret;
-       }
+       drm_debugfs_create_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES,
+                                minor->debugfs_root, minor);
 
        if (drm_drv_uses_atomic_modeset(dev)) {
-               ret = drm_atomic_debugfs_init(minor);
-               if (ret) {
-                       DRM_ERROR("Failed to create atomic debugfs files\n");
-                       return ret;
-               }
+               drm_atomic_debugfs_init(minor);
        }
 
        if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-               ret = drm_framebuffer_debugfs_init(minor);
-               if (ret) {
-                       DRM_ERROR("Failed to create framebuffer debugfs file\n");
-                       return ret;
-               }
+               drm_framebuffer_debugfs_init(minor);
 
-               ret = drm_client_debugfs_init(minor);
-               if (ret) {
-                       DRM_ERROR("Failed to create client debugfs file\n");
-                       return ret;
-               }
+               drm_client_debugfs_init(minor);
        }
 
-       if (dev->driver->debugfs_init) {
-               ret = dev->driver->debugfs_init(minor);
-               if (ret) {
-                       DRM_ERROR("DRM: Driver failed to initialize "
-                                 "/sys/kernel/debug/dri.\n");
-                       return ret;
-               }
-       }
+       if (dev->driver->debugfs_init)
+               dev->driver->debugfs_init(minor);
+
        return 0;
 }
 
index a7add55a85b43030cc4e5fea56e626df17c9cc85..d07ba54ec94548227ac4679dc509d54ed8c743d4 100644 (file)
@@ -34,9 +34,9 @@
  */
 
 #include <linux/export.h>
+#include <linux/pci.h>
 
 #include <drm/drm_drv.h>
-#include <drm/drm_pci.h>
 #include <drm/drm_print.h>
 
 #include "drm_legacy.h"
index c6fbe6e6bc9dcf7807d5422e621d3adbd1da5641..19c99dddcb992c515dcf585a788ace22048887d0 100644 (file)
@@ -1238,6 +1238,8 @@ static const struct dpcd_quirk dpcd_quirk_list[] = {
        { OUI(0x00, 0x00, 0x00), DEVICE_ID('C', 'H', '7', '5', '1', '1'), false, BIT(DP_DPCD_QUIRK_NO_SINK_COUNT) },
        /* Synaptics DP1.4 MST hubs can support DSC without virtual DPCD */
        { OUI(0x90, 0xCC, 0x24), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) },
+       /* Apple MacBookPro 2017 15 inch eDP Retina panel reports too low DP_MAX_LINK_RATE */
+       { OUI(0x00, 0x10, 0xfa), DEVICE_ID(101, 68, 21, 101, 98, 97), false, BIT(DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS) },
 };
 
 #undef OUI
@@ -1313,6 +1315,7 @@ static const struct edid_quirk edid_quirk_list[] = {
        { MFG(0x06, 0xaf), PROD_ID(0xeb, 0x41), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
        { MFG(0x4d, 0x10), PROD_ID(0xc7, 0x14), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
        { MFG(0x4d, 0x10), PROD_ID(0xe6, 0x14), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
+       { MFG(0x4c, 0x83), PROD_ID(0x47, 0x41), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
 };
 
 #undef MFG
@@ -1533,3 +1536,271 @@ int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_S
        return num_bpc;
 }
 EXPORT_SYMBOL(drm_dp_dsc_sink_supported_input_bpcs);
+
+/**
+ * drm_dp_get_phy_test_pattern() - get the requested pattern from the sink.
+ * @aux: DisplayPort AUX channel
+ * @data: DP phy compliance test parameters.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_get_phy_test_pattern(struct drm_dp_aux *aux,
+                               struct drm_dp_phy_test_params *data)
+{
+       int err;
+       u8 rate, lanes;
+
+       err = drm_dp_dpcd_readb(aux, DP_TEST_LINK_RATE, &rate);
+       if (err < 0)
+               return err;
+       data->link_rate = drm_dp_bw_code_to_link_rate(rate);
+
+       err = drm_dp_dpcd_readb(aux, DP_TEST_LANE_COUNT, &lanes);
+       if (err < 0)
+               return err;
+       data->num_lanes = lanes & DP_MAX_LANE_COUNT_MASK;
+
+       if (lanes & DP_ENHANCED_FRAME_CAP)
+               data->enhanced_frame_cap = true;
+
+       err = drm_dp_dpcd_readb(aux, DP_PHY_TEST_PATTERN, &data->phy_pattern);
+       if (err < 0)
+               return err;
+
+       switch (data->phy_pattern) {
+       case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
+               err = drm_dp_dpcd_read(aux, DP_TEST_80BIT_CUSTOM_PATTERN_7_0,
+                                      &data->custom80, sizeof(data->custom80));
+               if (err < 0)
+                       return err;
+
+               break;
+       case DP_PHY_TEST_PATTERN_CP2520:
+               err = drm_dp_dpcd_read(aux, DP_TEST_HBR2_SCRAMBLER_RESET,
+                                      &data->hbr2_reset,
+                                      sizeof(data->hbr2_reset));
+               if (err < 0)
+                       return err;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_dp_get_phy_test_pattern);
+
+/**
+ * drm_dp_set_phy_test_pattern() - set the pattern to the sink.
+ * @aux: DisplayPort AUX channel
+ * @data: DP phy compliance test parameters.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_set_phy_test_pattern(struct drm_dp_aux *aux,
+                               struct drm_dp_phy_test_params *data, u8 dp_rev)
+{
+       int err, i;
+       u8 link_config[2];
+       u8 test_pattern;
+
+       link_config[0] = drm_dp_link_rate_to_bw_code(data->link_rate);
+       link_config[1] = data->num_lanes;
+       if (data->enhanced_frame_cap)
+               link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+       err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, link_config, 2);
+       if (err < 0)
+               return err;
+
+       test_pattern = data->phy_pattern;
+       if (dp_rev < 0x12) {
+               test_pattern = (test_pattern << 2) &
+                              DP_LINK_QUAL_PATTERN_11_MASK;
+               err = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET,
+                                        test_pattern);
+               if (err < 0)
+                       return err;
+       } else {
+               for (i = 0; i < data->num_lanes; i++) {
+                       err = drm_dp_dpcd_writeb(aux,
+                                                DP_LINK_QUAL_LANE0_SET + i,
+                                                test_pattern);
+                       if (err < 0)
+                               return err;
+               }
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_dp_set_phy_test_pattern);
+
+static const char *dp_pixelformat_get_name(enum dp_pixelformat pixelformat)
+{
+       if (pixelformat < 0 || pixelformat > DP_PIXELFORMAT_RESERVED)
+               return "Invalid";
+
+       switch (pixelformat) {
+       case DP_PIXELFORMAT_RGB:
+               return "RGB";
+       case DP_PIXELFORMAT_YUV444:
+               return "YUV444";
+       case DP_PIXELFORMAT_YUV422:
+               return "YUV422";
+       case DP_PIXELFORMAT_YUV420:
+               return "YUV420";
+       case DP_PIXELFORMAT_Y_ONLY:
+               return "Y_ONLY";
+       case DP_PIXELFORMAT_RAW:
+               return "RAW";
+       default:
+               return "Reserved";
+       }
+}
+
+static const char *dp_colorimetry_get_name(enum dp_pixelformat pixelformat,
+                                          enum dp_colorimetry colorimetry)
+{
+       if (pixelformat < 0 || pixelformat > DP_PIXELFORMAT_RESERVED)
+               return "Invalid";
+
+       switch (colorimetry) {
+       case DP_COLORIMETRY_DEFAULT:
+               switch (pixelformat) {
+               case DP_PIXELFORMAT_RGB:
+                       return "sRGB";
+               case DP_PIXELFORMAT_YUV444:
+               case DP_PIXELFORMAT_YUV422:
+               case DP_PIXELFORMAT_YUV420:
+                       return "BT.601";
+               case DP_PIXELFORMAT_Y_ONLY:
+                       return "DICOM PS3.14";
+               case DP_PIXELFORMAT_RAW:
+                       return "Custom Color Profile";
+               default:
+                       return "Reserved";
+               }
+       case DP_COLORIMETRY_RGB_WIDE_FIXED: /* and DP_COLORIMETRY_BT709_YCC */
+               switch (pixelformat) {
+               case DP_PIXELFORMAT_RGB:
+                       return "Wide Fixed";
+               case DP_PIXELFORMAT_YUV444:
+               case DP_PIXELFORMAT_YUV422:
+               case DP_PIXELFORMAT_YUV420:
+                       return "BT.709";
+               default:
+                       return "Reserved";
+               }
+       case DP_COLORIMETRY_RGB_WIDE_FLOAT: /* and DP_COLORIMETRY_XVYCC_601 */
+               switch (pixelformat) {
+               case DP_PIXELFORMAT_RGB:
+                       return "Wide Float";
+               case DP_PIXELFORMAT_YUV444:
+               case DP_PIXELFORMAT_YUV422:
+               case DP_PIXELFORMAT_YUV420:
+                       return "xvYCC 601";
+               default:
+                       return "Reserved";
+               }
+       case DP_COLORIMETRY_OPRGB: /* and DP_COLORIMETRY_XVYCC_709 */
+               switch (pixelformat) {
+               case DP_PIXELFORMAT_RGB:
+                       return "OpRGB";
+               case DP_PIXELFORMAT_YUV444:
+               case DP_PIXELFORMAT_YUV422:
+               case DP_PIXELFORMAT_YUV420:
+                       return "xvYCC 709";
+               default:
+                       return "Reserved";
+               }
+       case DP_COLORIMETRY_DCI_P3_RGB: /* and DP_COLORIMETRY_SYCC_601 */
+               switch (pixelformat) {
+               case DP_PIXELFORMAT_RGB:
+                       return "DCI-P3";
+               case DP_PIXELFORMAT_YUV444:
+               case DP_PIXELFORMAT_YUV422:
+               case DP_PIXELFORMAT_YUV420:
+                       return "sYCC 601";
+               default:
+                       return "Reserved";
+               }
+       case DP_COLORIMETRY_RGB_CUSTOM: /* and DP_COLORIMETRY_OPYCC_601 */
+               switch (pixelformat) {
+               case DP_PIXELFORMAT_RGB:
+                       return "Custom Profile";
+               case DP_PIXELFORMAT_YUV444:
+               case DP_PIXELFORMAT_YUV422:
+               case DP_PIXELFORMAT_YUV420:
+                       return "OpYCC 601";
+               default:
+                       return "Reserved";
+               }
+       case DP_COLORIMETRY_BT2020_RGB: /* and DP_COLORIMETRY_BT2020_CYCC */
+               switch (pixelformat) {
+               case DP_PIXELFORMAT_RGB:
+                       return "BT.2020 RGB";
+               case DP_PIXELFORMAT_YUV444:
+               case DP_PIXELFORMAT_YUV422:
+               case DP_PIXELFORMAT_YUV420:
+                       return "BT.2020 CYCC";
+               default:
+                       return "Reserved";
+               }
+       case DP_COLORIMETRY_BT2020_YCC:
+               switch (pixelformat) {
+               case DP_PIXELFORMAT_YUV444:
+               case DP_PIXELFORMAT_YUV422:
+               case DP_PIXELFORMAT_YUV420:
+                       return "BT.2020 YCC";
+               default:
+                       return "Reserved";
+               }
+       default:
+               return "Invalid";
+       }
+}
+
+static const char *dp_dynamic_range_get_name(enum dp_dynamic_range dynamic_range)
+{
+       switch (dynamic_range) {
+       case DP_DYNAMIC_RANGE_VESA:
+               return "VESA range";
+       case DP_DYNAMIC_RANGE_CTA:
+               return "CTA range";
+       default:
+               return "Invalid";
+       }
+}
+
+static const char *dp_content_type_get_name(enum dp_content_type content_type)
+{
+       switch (content_type) {
+       case DP_CONTENT_TYPE_NOT_DEFINED:
+               return "Not defined";
+       case DP_CONTENT_TYPE_GRAPHICS:
+               return "Graphics";
+       case DP_CONTENT_TYPE_PHOTO:
+               return "Photo";
+       case DP_CONTENT_TYPE_VIDEO:
+               return "Video";
+       case DP_CONTENT_TYPE_GAME:
+               return "Game";
+       default:
+               return "Reserved";
+       }
+}
+
+void drm_dp_vsc_sdp_log(const char *level, struct device *dev,
+                       const struct drm_dp_vsc_sdp *vsc)
+{
+#define DP_SDP_LOG(fmt, ...) dev_printk(level, dev, fmt, ##__VA_ARGS__)
+       DP_SDP_LOG("DP SDP: %s, revision %u, length %u\n", "VSC",
+                  vsc->revision, vsc->length);
+       DP_SDP_LOG("    pixelformat: %s\n",
+                  dp_pixelformat_get_name(vsc->pixelformat));
+       DP_SDP_LOG("    colorimetry: %s\n",
+                  dp_colorimetry_get_name(vsc->pixelformat, vsc->colorimetry));
+       DP_SDP_LOG("    bpc: %u\n", vsc->bpc);
+       DP_SDP_LOG("    dynamic range: %s\n",
+                  dp_dynamic_range_get_name(vsc->dynamic_range));
+       DP_SDP_LOG("    content type: %s\n",
+                  dp_content_type_get_name(vsc->content_type));
+#undef DP_SDP_LOG
+}
+EXPORT_SYMBOL(drm_dp_vsc_sdp_log);
index 35b62c5d18b40dfe2a28b2c0fd26ce437605e2fb..1e26b89628f98c7d4e4b3bed44078c6c088165fb 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/seq_file.h>
+#include <linux/iopoll.h>
 
 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
 #include <linux/stacktrace.h>
@@ -687,51 +688,45 @@ static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *
        raw->cur_len = idx;
 }
 
-/* this adds a chunk of msg to the builder to get the final msg */
-static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
-                                     u8 *replybuf, u8 replybuflen, bool hdr)
+static int drm_dp_sideband_msg_set_header(struct drm_dp_sideband_msg_rx *msg,
+                                         struct drm_dp_sideband_msg_hdr *hdr,
+                                         u8 hdrlen)
 {
-       int ret;
-       u8 crc4;
+       /*
+        * ignore out-of-order messages or messages that are part of a
+        * failed transaction
+        */
+       if (!hdr->somt && !msg->have_somt)
+               return false;
 
-       if (hdr) {
-               u8 hdrlen;
-               struct drm_dp_sideband_msg_hdr recv_hdr;
-               ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
-               if (ret == false) {
-                       print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
-                       return false;
-               }
+       /* get length contained in this portion */
+       msg->curchunk_idx = 0;
+       msg->curchunk_len = hdr->msg_len;
+       msg->curchunk_hdrlen = hdrlen;
 
-               /*
-                * ignore out-of-order messages or messages that are part of a
-                * failed transaction
-                */
-               if (!recv_hdr.somt && !msg->have_somt)
-                       return false;
+       /* we have already gotten an somt - don't bother parsing */
+       if (hdr->somt && msg->have_somt)
+               return false;
 
-               /* get length contained in this portion */
-               msg->curchunk_len = recv_hdr.msg_len;
-               msg->curchunk_hdrlen = hdrlen;
+       if (hdr->somt) {
+               memcpy(&msg->initial_hdr, hdr,
+                      sizeof(struct drm_dp_sideband_msg_hdr));
+               msg->have_somt = true;
+       }
+       if (hdr->eomt)
+               msg->have_eomt = true;
 
-               /* we have already gotten an somt - don't bother parsing */
-               if (recv_hdr.somt && msg->have_somt)
-                       return false;
+       return true;
+}
 
-               if (recv_hdr.somt) {
-                       memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
-                       msg->have_somt = true;
-               }
-               if (recv_hdr.eomt)
-                       msg->have_eomt = true;
+/* this adds a chunk of msg to the builder to get the final msg */
+static bool drm_dp_sideband_append_payload(struct drm_dp_sideband_msg_rx *msg,
+                                          u8 *replybuf, u8 replybuflen)
+{
+       u8 crc4;
 
-               /* copy the bytes for the remainder of this header chunk */
-               msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
-               memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
-       } else {
-               memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
-               msg->curchunk_idx += replybuflen;
-       }
+       memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
+       msg->curchunk_idx += replybuflen;
 
        if (msg->curchunk_idx >= msg->curchunk_len) {
                /* do CRC */
@@ -1060,13 +1055,12 @@ static void build_link_address(struct drm_dp_sideband_msg_tx *msg)
        drm_dp_encode_sideband_req(&req, msg);
 }
 
-static int build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
+static void build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
 {
        struct drm_dp_sideband_msg_req_body req;
 
        req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE;
        drm_dp_encode_sideband_req(&req, msg);
-       return 0;
 }
 
 static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg,
@@ -1203,16 +1197,8 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
 
                /* remove from q */
                if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
-                   txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
+                   txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND)
                        list_del(&txmsg->next);
-               }
-
-               if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
-                   txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
-                       mstb->tx_slots[txmsg->seqno] = NULL;
-               }
-               mgr->is_waiting_for_dwn_reply = false;
-
        }
 out:
        if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
@@ -2691,22 +2677,6 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
        struct drm_dp_mst_branch *mstb = txmsg->dst;
        u8 req_type;
 
-       /* both msg slots are full */
-       if (txmsg->seqno == -1) {
-               if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
-                       DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
-                       return -EAGAIN;
-               }
-               if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
-                       txmsg->seqno = mstb->last_seqno;
-                       mstb->last_seqno ^= 1;
-               } else if (mstb->tx_slots[0] == NULL)
-                       txmsg->seqno = 0;
-               else
-                       txmsg->seqno = 1;
-               mstb->tx_slots[txmsg->seqno] = txmsg;
-       }
-
        req_type = txmsg->msg[0] & 0x7f;
        if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
                req_type == DP_RESOURCE_STATUS_NOTIFY)
@@ -2718,7 +2688,7 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
        hdr->lcr = mstb->lct - 1;
        if (mstb->lct > 1)
                memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
-       hdr->seqno = txmsg->seqno;
+
        return 0;
 }
 /*
@@ -2733,15 +2703,15 @@ static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
        int len, space, idx, tosend;
        int ret;
 
+       if (txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
+               return 0;
+
        memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
 
-       if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
-               txmsg->seqno = -1;
+       if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED)
                txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
-       }
 
-       /* make hdr from dst mst - for replies use seqno
-          otherwise assign one */
+       /* make hdr from dst mst */
        ret = set_hdr_from_dst_qlock(&hdr, txmsg);
        if (ret < 0)
                return ret;
@@ -2794,42 +2764,17 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
        if (list_empty(&mgr->tx_msg_downq))
                return;
 
-       txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
+       txmsg = list_first_entry(&mgr->tx_msg_downq,
+                                struct drm_dp_sideband_msg_tx, next);
        ret = process_single_tx_qlock(mgr, txmsg, false);
-       if (ret == 1) {
-               /* txmsg is sent it should be in the slots now */
-               mgr->is_waiting_for_dwn_reply = true;
-               list_del(&txmsg->next);
-       } else if (ret) {
+       if (ret < 0) {
                DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
-               mgr->is_waiting_for_dwn_reply = false;
                list_del(&txmsg->next);
-               if (txmsg->seqno != -1)
-                       txmsg->dst->tx_slots[txmsg->seqno] = NULL;
                txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
                wake_up_all(&mgr->tx_waitq);
        }
 }
 
-/* called holding qlock */
-static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
-                                      struct drm_dp_sideband_msg_tx *txmsg)
-{
-       int ret;
-
-       /* construct a chunk from the first msg in the tx_msg queue */
-       ret = process_single_tx_qlock(mgr, txmsg, true);
-
-       if (ret != 1)
-               DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
-
-       if (txmsg->seqno != -1) {
-               WARN_ON((unsigned int)txmsg->seqno >
-                       ARRAY_SIZE(txmsg->dst->tx_slots));
-               txmsg->dst->tx_slots[txmsg->seqno] = NULL;
-       }
-}
-
 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
                                 struct drm_dp_sideband_msg_tx *txmsg)
 {
@@ -2842,8 +2787,7 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
                drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
        }
 
-       if (list_is_singular(&mgr->tx_msg_downq) &&
-           !mgr->is_waiting_for_dwn_reply)
+       if (list_is_singular(&mgr->tx_msg_downq))
                process_single_down_tx_qlock(mgr);
        mutex_unlock(&mgr->qlock);
 }
@@ -3467,7 +3411,7 @@ static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req
 
 static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
                                    struct drm_dp_mst_branch *mstb,
-                                   int req_type, int seqno, bool broadcast)
+                                   int req_type, bool broadcast)
 {
        struct drm_dp_sideband_msg_tx *txmsg;
 
@@ -3476,13 +3420,11 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
                return -ENOMEM;
 
        txmsg->dst = mstb;
-       txmsg->seqno = seqno;
        drm_dp_encode_up_ack_reply(txmsg, req_type);
 
        mutex_lock(&mgr->qlock);
-
-       process_single_up_tx_qlock(mgr, txmsg);
-
+       /* construct a chunk from the first msg in the tx_msg queue */
+       process_single_tx_qlock(mgr, txmsg, true);
        mutex_unlock(&mgr->qlock);
 
        kfree(txmsg);
@@ -3707,31 +3649,63 @@ out_fail:
 }
 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
 
-static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
+static bool
+drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
+                     struct drm_dp_mst_branch **mstb)
 {
        int len;
        u8 replyblock[32];
        int replylen, curreply;
        int ret;
-       struct drm_dp_sideband_msg_rx *msg;
-       int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
-       msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
+       u8 hdrlen;
+       struct drm_dp_sideband_msg_hdr hdr;
+       struct drm_dp_sideband_msg_rx *msg =
+               up ? &mgr->up_req_recv : &mgr->down_rep_recv;
+       int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE :
+                          DP_SIDEBAND_MSG_DOWN_REP_BASE;
+
+       if (!up)
+               *mstb = NULL;
 
        len = min(mgr->max_dpcd_transaction_bytes, 16);
-       ret = drm_dp_dpcd_read(mgr->aux, basereg,
-                              replyblock, len);
+       ret = drm_dp_dpcd_read(mgr->aux, basereg, replyblock, len);
        if (ret != len) {
                DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
                return false;
        }
-       ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
+
+       ret = drm_dp_decode_sideband_msg_hdr(&hdr, replyblock, len, &hdrlen);
+       if (ret == false) {
+               print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16,
+                              1, replyblock, len, false);
+               DRM_DEBUG_KMS("ERROR: failed header\n");
+               return false;
+       }
+
+       if (!up) {
+               /* Caller is responsible for giving back this reference */
+               *mstb = drm_dp_get_mst_branch_device(mgr, hdr.lct, hdr.rad);
+               if (!*mstb) {
+                       DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
+                                     hdr.lct);
+                       return false;
+               }
+       }
+
+       if (!drm_dp_sideband_msg_set_header(msg, &hdr, hdrlen)) {
+               DRM_DEBUG_KMS("sideband msg set header failed %d\n",
+                             replyblock[0]);
+               return false;
+       }
+
+       replylen = min(msg->curchunk_len, (u8)(len - hdrlen));
+       ret = drm_dp_sideband_append_payload(msg, replyblock + hdrlen, replylen);
        if (!ret) {
                DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
                return false;
        }
-       replylen = msg->curchunk_len + msg->curchunk_hdrlen;
 
-       replylen -= len;
+       replylen = msg->curchunk_len + msg->curchunk_hdrlen - len;
        curreply = len;
        while (replylen > 0) {
                len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
@@ -3743,7 +3717,7 @@ static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
                        return false;
                }
 
-               ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
+               ret = drm_dp_sideband_append_payload(msg, replyblock, len);
                if (!ret) {
                        DRM_DEBUG_KMS("failed to build sideband msg\n");
                        return false;
@@ -3758,67 +3732,60 @@ static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
 {
        struct drm_dp_sideband_msg_tx *txmsg;
-       struct drm_dp_mst_branch *mstb;
-       struct drm_dp_sideband_msg_hdr *hdr = &mgr->down_rep_recv.initial_hdr;
-       int slot = -1;
-
-       if (!drm_dp_get_one_sb_msg(mgr, false))
-               goto clear_down_rep_recv;
+       struct drm_dp_mst_branch *mstb = NULL;
+       struct drm_dp_sideband_msg_rx *msg = &mgr->down_rep_recv;
 
-       if (!mgr->down_rep_recv.have_eomt)
-               return 0;
+       if (!drm_dp_get_one_sb_msg(mgr, false, &mstb))
+               goto out;
 
-       mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
-       if (!mstb) {
-               DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
-                             hdr->lct);
-               goto clear_down_rep_recv;
-       }
+       /* Multi-packet message transmission, don't clear the reply */
+       if (!msg->have_eomt)
+               goto out;
 
        /* find the message */
-       slot = hdr->seqno;
        mutex_lock(&mgr->qlock);
-       txmsg = mstb->tx_slots[slot];
-       /* remove from slots */
+       txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,
+                                        struct drm_dp_sideband_msg_tx, next);
        mutex_unlock(&mgr->qlock);
 
-       if (!txmsg) {
+       /* Were we actually expecting a response, and from this mstb? */
+       if (!txmsg || txmsg->dst != mstb) {
+               struct drm_dp_sideband_msg_hdr *hdr;
+               hdr = &msg->initial_hdr;
                DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
                              mstb, hdr->seqno, hdr->lct, hdr->rad[0],
-                             mgr->down_rep_recv.msg[0]);
-               goto no_msg;
+                             msg->msg[0]);
+               goto out_clear_reply;
        }
 
-       drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
+       drm_dp_sideband_parse_reply(msg, &txmsg->reply);
 
-       if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
+       if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
                DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
                              txmsg->reply.req_type,
                              drm_dp_mst_req_type_str(txmsg->reply.req_type),
                              txmsg->reply.u.nak.reason,
                              drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
                              txmsg->reply.u.nak.nak_data);
+       }
 
-       memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+       memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
        drm_dp_mst_topology_put_mstb(mstb);
 
        mutex_lock(&mgr->qlock);
        txmsg->state = DRM_DP_SIDEBAND_TX_RX;
-       mstb->tx_slots[slot] = NULL;
-       mgr->is_waiting_for_dwn_reply = false;
+       list_del(&txmsg->next);
        mutex_unlock(&mgr->qlock);
 
        wake_up_all(&mgr->tx_waitq);
 
        return 0;
 
-no_msg:
-       drm_dp_mst_topology_put_mstb(mstb);
-clear_down_rep_recv:
-       mutex_lock(&mgr->qlock);
-       mgr->is_waiting_for_dwn_reply = false;
-       mutex_unlock(&mgr->qlock);
-       memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+out_clear_reply:
+       memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
+out:
+       if (mstb)
+               drm_dp_mst_topology_put_mstb(mstb);
 
        return 0;
 }
@@ -3894,11 +3861,9 @@ static void drm_dp_mst_up_req_work(struct work_struct *work)
 
 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
 {
-       struct drm_dp_sideband_msg_hdr *hdr = &mgr->up_req_recv.initial_hdr;
        struct drm_dp_pending_up_req *up_req;
-       bool seqno;
 
-       if (!drm_dp_get_one_sb_msg(mgr, true))
+       if (!drm_dp_get_one_sb_msg(mgr, true, NULL))
                goto out;
 
        if (!mgr->up_req_recv.have_eomt)
@@ -3911,7 +3876,6 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
        }
        INIT_LIST_HEAD(&up_req->next);
 
-       seqno = hdr->seqno;
        drm_dp_sideband_parse_req(&mgr->up_req_recv, &up_req->msg);
 
        if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY &&
@@ -3923,7 +3887,7 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
        }
 
        drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type,
-                                seqno, false);
+                                false);
 
        if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
                const struct drm_dp_connection_status_notify *conn_stat =
@@ -3945,7 +3909,7 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
                              res_stat->available_pbn);
        }
 
-       up_req->hdr = *hdr;
+       up_req->hdr = mgr->up_req_recv.initial_hdr;
        mutex_lock(&mgr->up_req_lock);
        list_add_tail(&up_req->next, &mgr->up_req_list);
        mutex_unlock(&mgr->up_req_lock);
@@ -4050,27 +4014,6 @@ out:
 }
 EXPORT_SYMBOL(drm_dp_mst_detect_port);
 
-/**
- * drm_dp_mst_port_has_audio() - Check whether port has audio capability or not
- * @mgr: manager for this port
- * @port: unverified pointer to a port.
- *
- * This returns whether the port supports audio or not.
- */
-bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
-                                       struct drm_dp_mst_port *port)
-{
-       bool ret = false;
-
-       port = drm_dp_mst_topology_get_port_validated(mgr, port);
-       if (!port)
-               return ret;
-       ret = port->has_audio;
-       drm_dp_mst_topology_put_port(port);
-       return ret;
-}
-EXPORT_SYMBOL(drm_dp_mst_port_has_audio);
-
 /**
  * drm_dp_mst_get_edid() - get EDID for an MST port
  * @connector: toplevel connector to get EDID for
@@ -4448,42 +4391,58 @@ fail:
        return ret;
 }
 
+static int do_get_act_status(struct drm_dp_aux *aux)
+{
+       int ret;
+       u8 status;
+
+       ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
+       if (ret < 0)
+               return ret;
+
+       return status;
+}
 
 /**
- * drm_dp_check_act_status() - Check ACT handled status.
+ * drm_dp_check_act_status() - Polls for ACT handled status.
  * @mgr: manager to use
  *
- * Check the payload status bits in the DPCD for ACT handled completion.
+ * Tries waiting for the MST hub to finish updating it's payload table by
+ * polling for the ACT handled bit for up to 3 seconds (yes-some hubs really
+ * take that long).
+ *
+ * Returns:
+ * 0 if the ACT was handled in time, negative error code on failure.
  */
 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
 {
-       u8 status;
-       int ret;
-       int count = 0;
-
-       do {
-               ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
-
-               if (ret < 0) {
-                       DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
-                       goto fail;
-               }
-
-               if (status & DP_PAYLOAD_ACT_HANDLED)
-                       break;
-               count++;
-               udelay(100);
-
-       } while (count < 30);
-
-       if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
-               DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
-               ret = -EINVAL;
-               goto fail;
+       /*
+        * There doesn't seem to be any recommended retry count or timeout in
+        * the MST specification. Since some hubs have been observed to take
+        * over 1 second to update their payload allocations under certain
+        * conditions, we use a rather large timeout value.
+        */
+       const int timeout_ms = 3000;
+       int ret, status;
+
+       ret = readx_poll_timeout(do_get_act_status, mgr->aux, status,
+                                status & DP_PAYLOAD_ACT_HANDLED || status < 0,
+                                200, timeout_ms * USEC_PER_MSEC);
+       if (ret < 0 && status >= 0) {
+               DRM_ERROR("Failed to get ACT after %dms, last status: %02x\n",
+                         timeout_ms, status);
+               return -EINVAL;
+       } else if (status < 0) {
+               /*
+                * Failure here isn't unexpected - the hub may have
+                * just been unplugged
+                */
+               DRM_DEBUG_KMS("Failed to read payload table status: %d\n",
+                             status);
+               return status;
        }
+
        return 0;
-fail:
-       return ret;
 }
 EXPORT_SYMBOL(drm_dp_check_act_status);
 
@@ -4674,28 +4633,18 @@ static void drm_dp_tx_work(struct work_struct *work)
        struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
 
        mutex_lock(&mgr->qlock);
-       if (!list_empty(&mgr->tx_msg_downq) && !mgr->is_waiting_for_dwn_reply)
+       if (!list_empty(&mgr->tx_msg_downq))
                process_single_down_tx_qlock(mgr);
        mutex_unlock(&mgr->qlock);
 }
 
-static inline void drm_dp_destroy_connector(struct drm_dp_mst_port *port)
+static inline void
+drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
 {
-       if (!port->connector)
-               return;
-
-       if (port->mgr->cbs->destroy_connector) {
-               port->mgr->cbs->destroy_connector(port->mgr, port->connector);
-       } else {
+       if (port->connector) {
                drm_connector_unregister(port->connector);
                drm_connector_put(port->connector);
        }
-}
-
-static inline void
-drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
-{
-       drm_dp_destroy_connector(port);
 
        drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
        drm_dp_mst_put_port_malloc(port);
@@ -4705,26 +4654,25 @@ static inline void
 drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb)
 {
        struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
-       struct drm_dp_mst_port *port, *tmp;
+       struct drm_dp_mst_port *port, *port_tmp;
+       struct drm_dp_sideband_msg_tx *txmsg, *txmsg_tmp;
        bool wake_tx = false;
 
        mutex_lock(&mgr->lock);
-       list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
+       list_for_each_entry_safe(port, port_tmp, &mstb->ports, next) {
                list_del(&port->next);
                drm_dp_mst_topology_put_port(port);
        }
        mutex_unlock(&mgr->lock);
 
-       /* drop any tx slots msg */
+       /* drop any tx slot msg */
        mutex_lock(&mstb->mgr->qlock);
-       if (mstb->tx_slots[0]) {
-               mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
-               mstb->tx_slots[0] = NULL;
-               wake_tx = true;
-       }
-       if (mstb->tx_slots[1]) {
-               mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
-               mstb->tx_slots[1] = NULL;
+       list_for_each_entry_safe(txmsg, txmsg_tmp, &mgr->tx_msg_downq, next) {
+               if (txmsg->dst != mstb)
+                       continue;
+
+               txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
+               list_del(&txmsg->next);
                wake_tx = true;
        }
        mutex_unlock(&mstb->mgr->qlock);
index 7b1a628d1f6e3277657a5582c1f196fa1a7fe851..bc38322f306ea24b7b32032f5d1bedcc80354a16 100644 (file)
@@ -39,6 +39,7 @@
 #include <drm/drm_color_mgmt.h>
 #include <drm/drm_drv.h>
 #include <drm/drm_file.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_mode_object.h>
 #include <drm/drm_print.h>
 
@@ -92,13 +93,27 @@ static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
        }
 }
 
+static void drm_minor_alloc_release(struct drm_device *dev, void *data)
+{
+       struct drm_minor *minor = data;
+       unsigned long flags;
+
+       WARN_ON(dev != minor->dev);
+
+       put_device(minor->kdev);
+
+       spin_lock_irqsave(&drm_minor_lock, flags);
+       idr_remove(&drm_minors_idr, minor->index);
+       spin_unlock_irqrestore(&drm_minor_lock, flags);
+}
+
 static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
 {
        struct drm_minor *minor;
        unsigned long flags;
        int r;
 
-       minor = kzalloc(sizeof(*minor), GFP_KERNEL);
+       minor = drmm_kzalloc(dev, sizeof(*minor), GFP_KERNEL);
        if (!minor)
                return -ENOMEM;
 
@@ -116,46 +131,20 @@ static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
        idr_preload_end();
 
        if (r < 0)
-               goto err_free;
+               return r;
 
        minor->index = r;
 
+       r = drmm_add_action_or_reset(dev, drm_minor_alloc_release, minor);
+       if (r)
+               return r;
+
        minor->kdev = drm_sysfs_minor_alloc(minor);
-       if (IS_ERR(minor->kdev)) {
-               r = PTR_ERR(minor->kdev);
-               goto err_index;
-       }
+       if (IS_ERR(minor->kdev))
+               return PTR_ERR(minor->kdev);
 
        *drm_minor_get_slot(dev, type) = minor;
        return 0;
-
-err_index:
-       spin_lock_irqsave(&drm_minor_lock, flags);
-       idr_remove(&drm_minors_idr, minor->index);
-       spin_unlock_irqrestore(&drm_minor_lock, flags);
-err_free:
-       kfree(minor);
-       return r;
-}
-
-static void drm_minor_free(struct drm_device *dev, unsigned int type)
-{
-       struct drm_minor **slot, *minor;
-       unsigned long flags;
-
-       slot = drm_minor_get_slot(dev, type);
-       minor = *slot;
-       if (!minor)
-               return;
-
-       put_device(minor->kdev);
-
-       spin_lock_irqsave(&drm_minor_lock, flags);
-       idr_remove(&drm_minors_idr, minor->index);
-       spin_unlock_irqrestore(&drm_minor_lock, flags);
-
-       kfree(minor);
-       *slot = NULL;
 }
 
 static int drm_minor_register(struct drm_device *dev, unsigned int type)
@@ -270,17 +259,22 @@ void drm_minor_release(struct drm_minor *minor)
  * any other resources allocated at device initialization and drop the driver's
  * reference to &drm_device using drm_dev_put().
  *
- * Note that the lifetime rules for &drm_device instance has still a lot of
- * historical baggage. Hence use the reference counting provided by
- * drm_dev_get() and drm_dev_put() only carefully.
+ * Note that any allocation or resource which is visible to userspace must be
+ * released only when the final drm_dev_put() is called, and not when the
+ * driver is unbound from the underlying physical struct &device. Best to use
+ * &drm_device managed resources with drmm_add_action(), drmm_kmalloc() and
+ * related functions.
+ *
+ * devres managed resources like devm_kmalloc() can only be used for resources
+ * directly related to the underlying hardware device, and only used in code
+ * paths fully protected by drm_dev_enter() and drm_dev_exit().
  *
  * Display driver example
  * ~~~~~~~~~~~~~~~~~~~~~~
  *
  * The following example shows a typical structure of a DRM display driver.
  * The example focus on the probe() function and the other functions that is
- * almost always present and serves as a demonstration of devm_drm_dev_init()
- * usage with its accompanying drm_driver->release callback.
+ * almost always present and serves as a demonstration of devm_drm_dev_init().
  *
  * .. code-block:: c
  *
@@ -290,19 +284,8 @@ void drm_minor_release(struct drm_minor *minor)
  *             struct clk *pclk;
  *     };
  *
- *     static void driver_drm_release(struct drm_device *drm)
- *     {
- *             struct driver_device *priv = container_of(...);
- *
- *             drm_mode_config_cleanup(drm);
- *             drm_dev_fini(drm);
- *             kfree(priv->userspace_facing);
- *             kfree(priv);
- *     }
- *
  *     static struct drm_driver driver_drm_driver = {
  *             [...]
- *             .release = driver_drm_release,
  *     };
  *
  *     static int driver_probe(struct platform_device *pdev)
@@ -322,13 +305,16 @@ void drm_minor_release(struct drm_minor *minor)
  *
  *             ret = devm_drm_dev_init(&pdev->dev, drm, &driver_drm_driver);
  *             if (ret) {
- *                     kfree(drm);
+ *                     kfree(priv);
  *                     return ret;
  *             }
+ *             drmm_add_final_kfree(drm, priv);
  *
- *             drm_mode_config_init(drm);
+ *             ret = drmm_mode_config_init(drm);
+ *             if (ret)
+ *                     return ret;
  *
- *             priv->userspace_facing = kzalloc(..., GFP_KERNEL);
+ *             priv->userspace_facing = drmm_kzalloc(..., GFP_KERNEL);
  *             if (!priv->userspace_facing)
  *                     return -ENOMEM;
  *
@@ -580,6 +566,23 @@ static void drm_fs_inode_free(struct inode *inode)
  *    used.
  */
 
+static void drm_dev_init_release(struct drm_device *dev, void *res)
+{
+       drm_legacy_ctxbitmap_cleanup(dev);
+       drm_legacy_remove_map_hash(dev);
+       drm_fs_inode_free(dev->anon_inode);
+
+       put_device(dev->dev);
+       /* Prevent use-after-free in drm_managed_release when debugging is
+        * enabled. Slightly awkward, but can't really be helped. */
+       dev->dev = NULL;
+       mutex_destroy(&dev->master_mutex);
+       mutex_destroy(&dev->clientlist_mutex);
+       mutex_destroy(&dev->filelist_mutex);
+       mutex_destroy(&dev->struct_mutex);
+       drm_legacy_destroy_members(dev);
+}
+
 /**
  * drm_dev_init - Initialise new DRM device
  * @dev: DRM device
@@ -608,6 +611,9 @@ static void drm_fs_inode_free(struct inode *inode)
  * arbitrary offset, you must supply a &drm_driver.release callback and control
  * the finalization explicitly.
  *
+ * Note that drivers must call drmm_add_final_kfree() after this function has
+ * completed successfully.
+ *
  * RETURNS:
  * 0 on success, or error code on failure.
  */
@@ -629,6 +635,9 @@ int drm_dev_init(struct drm_device *dev,
        dev->dev = get_device(parent);
        dev->driver = driver;
 
+       INIT_LIST_HEAD(&dev->managed.resources);
+       spin_lock_init(&dev->managed.lock);
+
        /* no per-device feature limits by default */
        dev->driver_features = ~0u;
 
@@ -644,26 +653,30 @@ int drm_dev_init(struct drm_device *dev,
        mutex_init(&dev->clientlist_mutex);
        mutex_init(&dev->master_mutex);
 
+       ret = drmm_add_action(dev, drm_dev_init_release, NULL);
+       if (ret)
+               return ret;
+
        dev->anon_inode = drm_fs_inode_new();
        if (IS_ERR(dev->anon_inode)) {
                ret = PTR_ERR(dev->anon_inode);
                DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
-               goto err_free;
+               goto err;
        }
 
        if (drm_core_check_feature(dev, DRIVER_RENDER)) {
                ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
                if (ret)
-                       goto err_minors;
+                       goto err;
        }
 
        ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY);
        if (ret)
-               goto err_minors;
+               goto err;
 
        ret = drm_legacy_create_map_hash(dev);
        if (ret)
-               goto err_minors;
+               goto err;
 
        drm_legacy_ctxbitmap_init(dev);
 
@@ -671,33 +684,19 @@ int drm_dev_init(struct drm_device *dev,
                ret = drm_gem_init(dev);
                if (ret) {
                        DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
-                       goto err_ctxbitmap;
+                       goto err;
                }
        }
 
        ret = drm_dev_set_unique(dev, dev_name(parent));
        if (ret)
-               goto err_setunique;
+               goto err;
 
        return 0;
 
-err_setunique:
-       if (drm_core_check_feature(dev, DRIVER_GEM))
-               drm_gem_destroy(dev);
-err_ctxbitmap:
-       drm_legacy_ctxbitmap_cleanup(dev);
-       drm_legacy_remove_map_hash(dev);
-err_minors:
-       drm_minor_free(dev, DRM_MINOR_PRIMARY);
-       drm_minor_free(dev, DRM_MINOR_RENDER);
-       drm_fs_inode_free(dev->anon_inode);
-err_free:
-       put_device(dev->dev);
-       mutex_destroy(&dev->master_mutex);
-       mutex_destroy(&dev->clientlist_mutex);
-       mutex_destroy(&dev->filelist_mutex);
-       mutex_destroy(&dev->struct_mutex);
-       drm_legacy_destroy_members(dev);
+err:
+       drm_managed_release(dev);
+
        return ret;
 }
 EXPORT_SYMBOL(drm_dev_init);
@@ -714,8 +713,10 @@ static void devm_drm_dev_init_release(void *data)
  * @driver: DRM driver
  *
  * Managed drm_dev_init(). The DRM device initialized with this function is
- * automatically put on driver detach using drm_dev_put(). You must supply a
- * &drm_driver.release callback to control the finalization explicitly.
+ * automatically put on driver detach using drm_dev_put().
+ *
+ * Note that drivers must call drmm_add_final_kfree() after this function has
+ * completed successfully.
  *
  * RETURNS:
  * 0 on success, or error code on failure.
@@ -726,9 +727,6 @@ int devm_drm_dev_init(struct device *parent,
 {
        int ret;
 
-       if (WARN_ON(!driver->release))
-               return -EINVAL;
-
        ret = drm_dev_init(dev, driver, parent);
        if (ret)
                return ret;
@@ -741,42 +739,28 @@ int devm_drm_dev_init(struct device *parent,
 }
 EXPORT_SYMBOL(devm_drm_dev_init);
 
-/**
- * drm_dev_fini - Finalize a dead DRM device
- * @dev: DRM device
- *
- * Finalize a dead DRM device. This is the converse to drm_dev_init() and
- * frees up all data allocated by it. All driver private data should be
- * finalized first. Note that this function does not free the @dev, that is
- * left to the caller.
- *
- * The ref-count of @dev must be zero, and drm_dev_fini() should only be called
- * from a &drm_driver.release callback.
- */
-void drm_dev_fini(struct drm_device *dev)
+void *__devm_drm_dev_alloc(struct device *parent, struct drm_driver *driver,
+                          size_t size, size_t offset)
 {
-       drm_vblank_cleanup(dev);
-
-       if (drm_core_check_feature(dev, DRIVER_GEM))
-               drm_gem_destroy(dev);
-
-       drm_legacy_ctxbitmap_cleanup(dev);
-       drm_legacy_remove_map_hash(dev);
-       drm_fs_inode_free(dev->anon_inode);
+       void *container;
+       struct drm_device *drm;
+       int ret;
 
-       drm_minor_free(dev, DRM_MINOR_PRIMARY);
-       drm_minor_free(dev, DRM_MINOR_RENDER);
+       container = kzalloc(size, GFP_KERNEL);
+       if (!container)
+               return ERR_PTR(-ENOMEM);
 
-       put_device(dev->dev);
+       drm = container + offset;
+       ret = devm_drm_dev_init(parent, drm, driver);
+       if (ret) {
+               kfree(container);
+               return ERR_PTR(ret);
+       }
+       drmm_add_final_kfree(drm, container);
 
-       mutex_destroy(&dev->master_mutex);
-       mutex_destroy(&dev->clientlist_mutex);
-       mutex_destroy(&dev->filelist_mutex);
-       mutex_destroy(&dev->struct_mutex);
-       drm_legacy_destroy_members(dev);
-       kfree(dev->unique);
+       return container;
 }
-EXPORT_SYMBOL(drm_dev_fini);
+EXPORT_SYMBOL(__devm_drm_dev_alloc);
 
 /**
  * drm_dev_alloc - Allocate new DRM device
@@ -816,6 +800,8 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
                return ERR_PTR(ret);
        }
 
+       drmm_add_final_kfree(dev, dev);
+
        return dev;
 }
 EXPORT_SYMBOL(drm_dev_alloc);
@@ -824,12 +810,13 @@ static void drm_dev_release(struct kref *ref)
 {
        struct drm_device *dev = container_of(ref, struct drm_device, ref);
 
-       if (dev->driver->release) {
+       if (dev->driver->release)
                dev->driver->release(dev);
-       } else {
-               drm_dev_fini(dev);
-               kfree(dev);
-       }
+
+       drm_managed_release(dev);
+
+       if (dev->managed.final_kfree)
+               kfree(dev->managed.final_kfree);
 }
 
 /**
@@ -946,6 +933,11 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
        struct drm_driver *driver = dev->driver;
        int ret;
 
+       if (!driver->load)
+               drm_mode_config_validate(dev);
+
+       WARN_ON(!dev->managed.final_kfree);
+
        if (drm_dev_needs_global_mutex(dev))
                mutex_lock(&drm_global_mutex);
 
@@ -1046,8 +1038,8 @@ EXPORT_SYMBOL(drm_dev_unregister);
  */
 int drm_dev_set_unique(struct drm_device *dev, const char *name)
 {
-       kfree(dev->unique);
-       dev->unique = kstrdup(name, GFP_KERNEL);
+       drmm_kfree(dev, dev->unique);
+       dev->unique = drmm_kstrdup(dev, name, GFP_KERNEL);
 
        return dev->unique ? 0 : -ENOMEM;
 }
index d96e3ce3e5359ef5d96438cfdd74500246fff621..fed653f13c266ce8ad3f748b1d1971660ea9893f 100644 (file)
@@ -1584,8 +1584,6 @@ module_param_named(edid_fixup, edid_fixup, int, 0400);
 MODULE_PARM_DESC(edid_fixup,
                 "Minimum number of valid EDID header bytes (0-8, default 6)");
 
-static void drm_get_displayid(struct drm_connector *connector,
-                             struct edid *edid);
 static int validate_displayid(u8 *displayid, int length, int idx);
 
 static int drm_edid_block_checksum(const u8 *raw_edid)
@@ -2019,18 +2017,13 @@ EXPORT_SYMBOL(drm_probe_ddc);
 struct edid *drm_get_edid(struct drm_connector *connector,
                          struct i2c_adapter *adapter)
 {
-       struct edid *edid;
-
        if (connector->force == DRM_FORCE_OFF)
                return NULL;
 
        if (connector->force == DRM_FORCE_UNSPECIFIED && !drm_probe_ddc(adapter))
                return NULL;
 
-       edid = drm_do_get_edid(connector, drm_do_probe_ddc_edid, adapter);
-       if (edid)
-               drm_get_displayid(connector, edid);
-       return edid;
+       return drm_do_get_edid(connector, drm_do_probe_ddc_edid, adapter);
 }
 EXPORT_SYMBOL(drm_get_edid);
 
@@ -2388,6 +2381,14 @@ bad_std_timing(u8 a, u8 b)
               (a == 0x20 && b == 0x20);
 }
 
+static int drm_mode_hsync(const struct drm_display_mode *mode)
+{
+       if (mode->htotal <= 0)
+               return 0;
+
+       return DIV_ROUND_CLOSEST(mode->clock, mode->htotal);
+}
+
 /**
  * drm_mode_std - convert standard mode info (width, height, refresh) into mode
  * @connector: connector of for the EDID block
@@ -3213,16 +3214,33 @@ static u8 *drm_find_edid_extension(const struct edid *edid, int ext_id)
 }
 
 
-static u8 *drm_find_displayid_extension(const struct edid *edid)
+static u8 *drm_find_displayid_extension(const struct edid *edid,
+                                       int *length, int *idx)
 {
-       return drm_find_edid_extension(edid, DISPLAYID_EXT);
+       u8 *displayid = drm_find_edid_extension(edid, DISPLAYID_EXT);
+       struct displayid_hdr *base;
+       int ret;
+
+       if (!displayid)
+               return NULL;
+
+       /* EDID extensions block checksum isn't for us */
+       *length = EDID_LENGTH - 1;
+       *idx = 1;
+
+       ret = validate_displayid(displayid, *length, *idx);
+       if (ret)
+               return NULL;
+
+       base = (struct displayid_hdr *)&displayid[*idx];
+       *length = *idx + sizeof(*base) + base->bytes;
+
+       return displayid;
 }
 
 static u8 *drm_find_cea_extension(const struct edid *edid)
 {
-       int ret;
-       int idx = 1;
-       int length = EDID_LENGTH;
+       int length, idx;
        struct displayid_block *block;
        u8 *cea;
        u8 *displayid;
@@ -3233,14 +3251,10 @@ static u8 *drm_find_cea_extension(const struct edid *edid)
                return cea;
 
        /* CEA blocks can also be found embedded in a DisplayID block */
-       displayid = drm_find_displayid_extension(edid);
+       displayid = drm_find_displayid_extension(edid, &length, &idx);
        if (!displayid)
                return NULL;
 
-       ret = validate_displayid(displayid, length, idx);
-       if (ret)
-               return NULL;
-
        idx += sizeof(struct displayid_hdr);
        for_each_displayid_db(displayid, block, idx, length) {
                if (block->tag == DATA_BLOCK_CTA) {
@@ -5085,7 +5099,7 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
 
 static int validate_displayid(u8 *displayid, int length, int idx)
 {
-       int i;
+       int i, dispid_length;
        u8 csum = 0;
        struct displayid_hdr *base;
 
@@ -5094,15 +5108,18 @@ static int validate_displayid(u8 *displayid, int length, int idx)
        DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n",
                      base->rev, base->bytes, base->prod_id, base->ext_count);
 
-       if (base->bytes + 5 > length - idx)
+       /* +1 for DispID checksum */
+       dispid_length = sizeof(*base) + base->bytes + 1;
+       if (dispid_length > length - idx)
                return -EINVAL;
-       for (i = idx; i <= base->bytes + 5; i++) {
-               csum += displayid[i];
-       }
+
+       for (i = 0; i < dispid_length; i++)
+               csum += displayid[idx + i];
        if (csum) {
                DRM_NOTE("DisplayID checksum invalid, remainder is %d\n", csum);
                return -EINVAL;
        }
+
        return 0;
 }
 
@@ -5181,20 +5198,14 @@ static int add_displayid_detailed_modes(struct drm_connector *connector,
                                        struct edid *edid)
 {
        u8 *displayid;
-       int ret;
-       int idx = 1;
-       int length = EDID_LENGTH;
+       int length, idx;
        struct displayid_block *block;
        int num_modes = 0;
 
-       displayid = drm_find_displayid_extension(edid);
+       displayid = drm_find_displayid_extension(edid, &length, &idx);
        if (!displayid)
                return 0;
 
-       ret = validate_displayid(displayid, length, idx);
-       if (ret)
-               return 0;
-
        idx += sizeof(struct displayid_hdr);
        for_each_displayid_db(displayid, block, idx, length) {
                switch (block->tag) {
@@ -5783,9 +5794,9 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
 EXPORT_SYMBOL(drm_hdmi_vendor_infoframe_from_display_mode);
 
 static int drm_parse_tiled_block(struct drm_connector *connector,
-                                struct displayid_block *block)
+                                const struct displayid_block *block)
 {
-       struct displayid_tiled_block *tile = (struct displayid_tiled_block *)block;
+       const struct displayid_tiled_block *tile = (struct displayid_tiled_block *)block;
        u16 w, h;
        u8 tile_v_loc, tile_h_loc;
        u8 num_v_tile, num_h_tile;
@@ -5836,22 +5847,12 @@ static int drm_parse_tiled_block(struct drm_connector *connector,
        return 0;
 }
 
-static int drm_parse_display_id(struct drm_connector *connector,
-                               u8 *displayid, int length,
-                               bool is_edid_extension)
+static int drm_displayid_parse_tiled(struct drm_connector *connector,
+                                    const u8 *displayid, int length, int idx)
 {
-       /* if this is an EDID extension the first byte will be 0x70 */
-       int idx = 0;
-       struct displayid_block *block;
+       const struct displayid_block *block;
        int ret;
 
-       if (is_edid_extension)
-               idx = 1;
-
-       ret = validate_displayid(displayid, length, idx);
-       if (ret)
-               return ret;
-
        idx += sizeof(struct displayid_hdr);
        for_each_displayid_db(displayid, block, idx, length) {
                DRM_DEBUG_KMS("block id 0x%x, rev %d, len %d\n",
@@ -5863,12 +5864,6 @@ static int drm_parse_display_id(struct drm_connector *connector,
                        if (ret)
                                return ret;
                        break;
-               case DATA_BLOCK_TYPE_1_DETAILED_TIMING:
-                       /* handled in mode gathering code. */
-                       break;
-               case DATA_BLOCK_CTA:
-                       /* handled in the cea parser code. */
-                       break;
                default:
                        DRM_DEBUG_KMS("found DisplayID tag 0x%x, unhandled\n", block->tag);
                        break;
@@ -5877,19 +5872,21 @@ static int drm_parse_display_id(struct drm_connector *connector,
        return 0;
 }
 
-static void drm_get_displayid(struct drm_connector *connector,
-                             struct edid *edid)
+void drm_update_tile_info(struct drm_connector *connector,
+                         const struct edid *edid)
 {
-       void *displayid = NULL;
+       const void *displayid = NULL;
+       int length, idx;
        int ret;
+
        connector->has_tile = false;
-       displayid = drm_find_displayid_extension(edid);
+       displayid = drm_find_displayid_extension(edid, &length, &idx);
        if (!displayid) {
                /* drop reference to any tile group we had */
                goto out_drop_ref;
        }
 
-       ret = drm_parse_display_id(connector, displayid, EDID_LENGTH, true);
+       ret = drm_displayid_parse_tiled(connector, displayid, length, idx);
        if (ret < 0)
                goto out_drop_ref;
        if (!connector->has_tile)
index 9801c0333eca29e937a1877525976bd572378547..cb2349ad338d953bbdd56896dc7ee08e1ab6d3a3 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * drm kms/fb cma (contiguous memory allocator) helper functions
  *
- * Copyright (C) 2012 Analog Device Inc.
+ * Copyright (C) 2012 Analog Devices Inc.
  *   Author: Lars-Peter Clausen <lars@metafoo.de>
  *
  * Based on udl_fbdev.c
index a9771de4d17e64816964c8a7244a46216814cd0e..02fc240268729a07cf00dcd4bf587dbc3ef52654 100644 (file)
@@ -514,6 +514,14 @@ struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper)
        if (ret)
                goto err_release;
 
+       /*
+        * TODO: We really should be smarter here and alloc an apperture
+        * for each IORESOURCE_MEM resource helper->dev->dev has and also
+        * init the ranges of the appertures based on the resources.
+        * Note some drivers currently count on there being only 1 empty
+        * aperture and fill this themselves, these will need to be dealt
+        * with somehow when fixing this.
+        */
        info->apertures = alloc_apertures(1);
        if (!info->apertures) {
                ret = -ENOMEM;
@@ -2162,6 +2170,8 @@ static const struct drm_client_funcs drm_fbdev_client_funcs = {
  *
  * This function sets up generic fbdev emulation for drivers that supports
  * dumb buffers with a virtual address and that can be mmap'ed.
+ * drm_fbdev_generic_setup() shall be called after the DRM driver registered
+ * the new DRM device with drm_dev_register().
  *
  * Restore, hotplug events and teardown are all taken care of. Drivers that do
  * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves.
@@ -2178,29 +2188,30 @@ static const struct drm_client_funcs drm_fbdev_client_funcs = {
  * Setup will be retried on the next hotplug event.
  *
  * The fbdev is destroyed by drm_dev_unregister().
- *
- * Returns:
- * Zero on success or negative error code on failure.
  */
-int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
+void drm_fbdev_generic_setup(struct drm_device *dev,
+                            unsigned int preferred_bpp)
 {
        struct drm_fb_helper *fb_helper;
        int ret;
 
-       WARN(dev->fb_helper, "fb_helper is already set!\n");
+       drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
+       drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
 
        if (!drm_fbdev_emulation)
-               return 0;
+               return;
 
        fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
-       if (!fb_helper)
-               return -ENOMEM;
+       if (!fb_helper) {
+               drm_err(dev, "Failed to allocate fb_helper\n");
+               return;
+       }
 
        ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
        if (ret) {
                kfree(fb_helper);
                drm_err(dev, "Failed to register client: %d\n", ret);
-               return ret;
+               return;
        }
 
        if (!preferred_bpp)
@@ -2214,8 +2225,6 @@ int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
                drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
 
        drm_client_register(&fb_helper->client);
-
-       return 0;
 }
 EXPORT_SYMBOL(drm_fbdev_generic_setup);
 
index 6a1f6c802415518319b1393c9d954a82f7af231e..2f12b8c1d01c13ec8d97304beeb51fad42e0e04c 100644 (file)
@@ -610,7 +610,8 @@ put_back_event:
                                file_priv->event_space -= length;
                                list_add(&e->link, &file_priv->event_list);
                                spin_unlock_irq(&dev->event_lock);
-                               wake_up_interruptible(&file_priv->event_wait);
+                               wake_up_interruptible_poll(&file_priv->event_wait,
+                                       EPOLLIN | EPOLLRDNORM);
                                break;
                        }
 
@@ -806,7 +807,8 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
        list_del(&e->pending_link);
        list_add_tail(&e->link,
                      &e->file_priv->event_list);
-       wake_up_interruptible(&e->file_priv->event_wait);
+       wake_up_interruptible_poll(&e->file_priv->event_wait,
+               EPOLLIN | EPOLLRDNORM);
 }
 EXPORT_SYMBOL(drm_send_event_locked);
 
index 57ac94ce9b9eefc3615f1399b9662a1e1f88827a..0375b3d7f8d0fd5ef57d85e3c48a539c6865ffe6 100644 (file)
@@ -1207,10 +1207,10 @@ static const struct drm_info_list drm_framebuffer_debugfs_list[] = {
        { "framebuffer", drm_framebuffer_info, 0 },
 };
 
-int drm_framebuffer_debugfs_init(struct drm_minor *minor)
+void drm_framebuffer_debugfs_init(struct drm_minor *minor)
 {
-       return drm_debugfs_create_files(drm_framebuffer_debugfs_list,
-                               ARRAY_SIZE(drm_framebuffer_debugfs_list),
-                               minor->debugfs_root, minor);
+       drm_debugfs_create_files(drm_framebuffer_debugfs_list,
+                                ARRAY_SIZE(drm_framebuffer_debugfs_list),
+                                minor->debugfs_root, minor);
 }
 #endif
index 37627d06fb0609e4dc9dcab6b33f981a280087b9..7bf628e130232e69a2ba0674ea2843f4e17ebd6a 100644 (file)
@@ -44,6 +44,7 @@
 #include <drm/drm_drv.h>
 #include <drm/drm_file.h>
 #include <drm/drm_gem.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_print.h>
 #include <drm/drm_vma_manager.h>
 
  * up at a later date, and as our interface with shmfs for memory allocation.
  */
 
+static void
+drm_gem_init_release(struct drm_device *dev, void *ptr)
+{
+       drm_vma_offset_manager_destroy(dev->vma_offset_manager);
+}
+
 /**
  * drm_gem_init - Initialize the GEM device fields
  * @dev: drm_devic structure to initialize
@@ -89,7 +96,8 @@ drm_gem_init(struct drm_device *dev)
        mutex_init(&dev->object_name_lock);
        idr_init_base(&dev->object_name_idr, 1);
 
-       vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
+       vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager),
+                                         GFP_KERNEL);
        if (!vma_offset_manager) {
                DRM_ERROR("out of memory\n");
                return -ENOMEM;
@@ -100,16 +108,7 @@ drm_gem_init(struct drm_device *dev)
                                    DRM_FILE_PAGE_OFFSET_START,
                                    DRM_FILE_PAGE_OFFSET_SIZE);
 
-       return 0;
-}
-
-void
-drm_gem_destroy(struct drm_device *dev)
-{
-
-       drm_vma_offset_manager_destroy(dev->vma_offset_manager);
-       kfree(dev->vma_offset_manager);
-       dev->vma_offset_manager = NULL;
+       return drmm_add_action(dev, drm_gem_init_release, NULL);
 }
 
 /**
@@ -432,7 +431,7 @@ err_unref:
  * drm_gem_handle_create - create a gem handle for an object
  * @file_priv: drm file-private structure to register the handle for
  * @obj: object to register
- * @handlep: pionter to return the created handle to the caller
+ * @handlep: pointer to return the created handle to the caller
  *
  * Create a handle for this object. This adds a handle reference to the object,
  * which includes a regular reference count. Callers will likely want to
index 3a7ace19a9021c1dfd7d761e0244725e7c77b429..ccc2c71fa49140915928335c5da8e5460d337a9f 100644 (file)
 #include <drm/drm_modeset_helper.h>
 #include <drm/drm_simple_kms_helper.h>
 
+#define AFBC_HEADER_SIZE               16
+#define AFBC_TH_LAYOUT_ALIGNMENT       8
+#define AFBC_HDR_ALIGN                 64
+#define AFBC_SUPERBLOCK_PIXELS         256
+#define AFBC_SUPERBLOCK_ALIGNMENT      128
+#define AFBC_TH_BODY_START_ALIGNMENT   4096
+
 /**
  * DOC: overview
  *
@@ -54,32 +61,25 @@ struct drm_gem_object *drm_gem_fb_get_obj(struct drm_framebuffer *fb,
 }
 EXPORT_SYMBOL_GPL(drm_gem_fb_get_obj);
 
-static struct drm_framebuffer *
-drm_gem_fb_alloc(struct drm_device *dev,
+static int
+drm_gem_fb_init(struct drm_device *dev,
+                struct drm_framebuffer *fb,
                 const struct drm_mode_fb_cmd2 *mode_cmd,
                 struct drm_gem_object **obj, unsigned int num_planes,
                 const struct drm_framebuffer_funcs *funcs)
 {
-       struct drm_framebuffer *fb;
        int ret, i;
 
-       fb = kzalloc(sizeof(*fb), GFP_KERNEL);
-       if (!fb)
-               return ERR_PTR(-ENOMEM);
-
        drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
 
        for (i = 0; i < num_planes; i++)
                fb->obj[i] = obj[i];
 
        ret = drm_framebuffer_init(dev, fb, funcs);
-       if (ret) {
+       if (ret)
                drm_err(dev, "Failed to init framebuffer: %d\n", ret);
-               kfree(fb);
-               return ERR_PTR(ret);
-       }
 
-       return fb;
+       return ret;
 }
 
 /**
@@ -123,10 +123,13 @@ int drm_gem_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *file,
 EXPORT_SYMBOL(drm_gem_fb_create_handle);
 
 /**
- * drm_gem_fb_create_with_funcs() - Helper function for the
- *                                  &drm_mode_config_funcs.fb_create
- *                                  callback
+ * drm_gem_fb_init_with_funcs() - Helper function for implementing
+ *                               &drm_mode_config_funcs.fb_create
+ *                               callback in cases when the driver
+ *                               allocates a subclass of
+ *                               struct drm_framebuffer
  * @dev: DRM device
+ * @fb: framebuffer object
  * @file: DRM file that holds the GEM handle(s) backing the framebuffer
  * @mode_cmd: Metadata from the userspace framebuffer creation request
  * @funcs: vtable to be used for the new framebuffer object
@@ -134,23 +137,26 @@ EXPORT_SYMBOL(drm_gem_fb_create_handle);
  * This function can be used to set &drm_framebuffer_funcs for drivers that need
  * custom framebuffer callbacks. Use drm_gem_fb_create() if you don't need to
  * change &drm_framebuffer_funcs. The function does buffer size validation.
+ * The buffer size validation is for a general case, though, so users should
+ * pay attention to the checks being appropriate for them or, at least,
+ * non-conflicting.
  *
  * Returns:
- * Pointer to a &drm_framebuffer on success or an error pointer on failure.
+ * Zero or a negative error code.
  */
-struct drm_framebuffer *
-drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
-                            const struct drm_mode_fb_cmd2 *mode_cmd,
-                            const struct drm_framebuffer_funcs *funcs)
+int drm_gem_fb_init_with_funcs(struct drm_device *dev,
+                              struct drm_framebuffer *fb,
+                              struct drm_file *file,
+                              const struct drm_mode_fb_cmd2 *mode_cmd,
+                              const struct drm_framebuffer_funcs *funcs)
 {
        const struct drm_format_info *info;
        struct drm_gem_object *objs[4];
-       struct drm_framebuffer *fb;
        int ret, i;
 
        info = drm_get_format_info(dev, mode_cmd);
        if (!info)
-               return ERR_PTR(-EINVAL);
+               return -EINVAL;
 
        for (i = 0; i < info->num_planes; i++) {
                unsigned int width = mode_cmd->width / (i ? info->hsub : 1);
@@ -175,19 +181,55 @@ drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
                }
        }
 
-       fb = drm_gem_fb_alloc(dev, mode_cmd, objs, i, funcs);
-       if (IS_ERR(fb)) {
-               ret = PTR_ERR(fb);
+       ret = drm_gem_fb_init(dev, fb, mode_cmd, objs, i, funcs);
+       if (ret)
                goto err_gem_object_put;
-       }
 
-       return fb;
+       return 0;
 
 err_gem_object_put:
        for (i--; i >= 0; i--)
                drm_gem_object_put_unlocked(objs[i]);
 
-       return ERR_PTR(ret);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(drm_gem_fb_init_with_funcs);
+
+/**
+ * drm_gem_fb_create_with_funcs() - Helper function for the
+ *                                  &drm_mode_config_funcs.fb_create
+ *                                  callback
+ * @dev: DRM device
+ * @file: DRM file that holds the GEM handle(s) backing the framebuffer
+ * @mode_cmd: Metadata from the userspace framebuffer creation request
+ * @funcs: vtable to be used for the new framebuffer object
+ *
+ * This function can be used to set &drm_framebuffer_funcs for drivers that need
+ * custom framebuffer callbacks. Use drm_gem_fb_create() if you don't need to
+ * change &drm_framebuffer_funcs. The function does buffer size validation.
+ *
+ * Returns:
+ * Pointer to a &drm_framebuffer on success or an error pointer on failure.
+ */
+struct drm_framebuffer *
+drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
+                            const struct drm_mode_fb_cmd2 *mode_cmd,
+                            const struct drm_framebuffer_funcs *funcs)
+{
+       struct drm_framebuffer *fb;
+       int ret;
+
+       fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+       if (!fb)
+               return ERR_PTR(-ENOMEM);
+
+       ret = drm_gem_fb_init_with_funcs(dev, fb, file, mode_cmd, funcs);
+       if (ret) {
+               kfree(fb);
+               return ERR_PTR(ret);
+       }
+
+       return fb;
 }
 EXPORT_SYMBOL_GPL(drm_gem_fb_create_with_funcs);
 
@@ -265,6 +307,132 @@ drm_gem_fb_create_with_dirty(struct drm_device *dev, struct drm_file *file,
 }
 EXPORT_SYMBOL_GPL(drm_gem_fb_create_with_dirty);
 
+static __u32 drm_gem_afbc_get_bpp(struct drm_device *dev,
+                                 const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+       const struct drm_format_info *info;
+
+       info = drm_get_format_info(dev, mode_cmd);
+
+       /* use whatever a driver has set */
+       if (info->cpp[0])
+               return info->cpp[0] * 8;
+
+       /* guess otherwise */
+       switch (info->format) {
+       case DRM_FORMAT_YUV420_8BIT:
+               return 12;
+       case DRM_FORMAT_YUV420_10BIT:
+               return 15;
+       case DRM_FORMAT_VUY101010:
+               return 30;
+       default:
+               break;
+       }
+
+       /* all attempts failed */
+       return 0;
+}
+
+static int drm_gem_afbc_min_size(struct drm_device *dev,
+                                const struct drm_mode_fb_cmd2 *mode_cmd,
+                                struct drm_afbc_framebuffer *afbc_fb)
+{
+       __u32 n_blocks, w_alignment, h_alignment, hdr_alignment;
+       /* remove bpp when all users properly encode cpp in drm_format_info */
+       __u32 bpp;
+
+       switch (mode_cmd->modifier[0] & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK) {
+       case AFBC_FORMAT_MOD_BLOCK_SIZE_16x16:
+               afbc_fb->block_width = 16;
+               afbc_fb->block_height = 16;
+               break;
+       case AFBC_FORMAT_MOD_BLOCK_SIZE_32x8:
+               afbc_fb->block_width = 32;
+               afbc_fb->block_height = 8;
+               break;
+       /* no user exists yet - fall through */
+       case AFBC_FORMAT_MOD_BLOCK_SIZE_64x4:
+       case AFBC_FORMAT_MOD_BLOCK_SIZE_32x8_64x4:
+       default:
+               drm_dbg_kms(dev, "Invalid AFBC_FORMAT_MOD_BLOCK_SIZE: %lld.\n",
+                           mode_cmd->modifier[0]
+                           & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK);
+               return -EINVAL;
+       }
+
+       /* tiled header afbc */
+       w_alignment = afbc_fb->block_width;
+       h_alignment = afbc_fb->block_height;
+       hdr_alignment = AFBC_HDR_ALIGN;
+       if (mode_cmd->modifier[0] & AFBC_FORMAT_MOD_TILED) {
+               w_alignment *= AFBC_TH_LAYOUT_ALIGNMENT;
+               h_alignment *= AFBC_TH_LAYOUT_ALIGNMENT;
+               hdr_alignment = AFBC_TH_BODY_START_ALIGNMENT;
+       }
+
+       afbc_fb->aligned_width = ALIGN(mode_cmd->width, w_alignment);
+       afbc_fb->aligned_height = ALIGN(mode_cmd->height, h_alignment);
+       afbc_fb->offset = mode_cmd->offsets[0];
+
+       bpp = drm_gem_afbc_get_bpp(dev, mode_cmd);
+       if (!bpp) {
+               drm_dbg_kms(dev, "Invalid AFBC bpp value: %d\n", bpp);
+               return -EINVAL;
+       }
+
+       n_blocks = (afbc_fb->aligned_width * afbc_fb->aligned_height)
+                  / AFBC_SUPERBLOCK_PIXELS;
+       afbc_fb->afbc_size = ALIGN(n_blocks * AFBC_HEADER_SIZE, hdr_alignment);
+       afbc_fb->afbc_size += n_blocks * ALIGN(bpp * AFBC_SUPERBLOCK_PIXELS / 8,
+                                              AFBC_SUPERBLOCK_ALIGNMENT);
+
+       return 0;
+}
+
+/**
+ * drm_gem_fb_afbc_init() - Helper function for drivers using afbc to
+ *                         fill and validate all the afbc-specific
+ *                         struct drm_afbc_framebuffer members
+ *
+ * @dev: DRM device
+ * @afbc_fb: afbc-specific framebuffer
+ * @mode_cmd: Metadata from the userspace framebuffer creation request
+ * @afbc_fb: afbc framebuffer
+ *
+ * This function can be used by drivers which support afbc to complete
+ * the preparation of struct drm_afbc_framebuffer. It must be called after
+ * allocating the said struct and calling drm_gem_fb_init_with_funcs().
+ * It is caller's responsibility to put afbc_fb->base.obj objects in case
+ * the call is unsuccessful.
+ *
+ * Returns:
+ * Zero on success or a negative error value on failure.
+ */
+int drm_gem_fb_afbc_init(struct drm_device *dev,
+                        const struct drm_mode_fb_cmd2 *mode_cmd,
+                        struct drm_afbc_framebuffer *afbc_fb)
+{
+       const struct drm_format_info *info;
+       struct drm_gem_object **objs;
+       int ret;
+
+       objs = afbc_fb->base.obj;
+       info = drm_get_format_info(dev, mode_cmd);
+       if (!info)
+               return -EINVAL;
+
+       ret = drm_gem_afbc_min_size(dev, mode_cmd, afbc_fb);
+       if (ret < 0)
+               return ret;
+
+       if (objs[0]->size < afbc_fb->afbc_size)
+               return -EINVAL;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(drm_gem_fb_afbc_init);
+
 /**
  * drm_gem_fb_prepare_fb() - Prepare a GEM backed framebuffer
  * @plane: Plane
index 92a11bb42365da6d2793a8e2d673bae7446b8cd0..8b2d5c945c95cf4f5772532ef74bf71ad760d81c 100644 (file)
@@ -1,10 +1,13 @@
 // SPDX-License-Identifier: GPL-2.0-or-later
 
+#include <linux/module.h>
+
 #include <drm/drm_debugfs.h>
 #include <drm/drm_device.h>
 #include <drm/drm_drv.h>
 #include <drm/drm_file.h>
 #include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_gem_ttm_helper.h>
 #include <drm/drm_gem_vram_helper.h>
 #include <drm/drm_mode.h>
@@ -18,13 +21,93 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
 /**
  * DOC: overview
  *
- * This library provides a GEM buffer object that is backed by video RAM
- * (VRAM). It can be used for framebuffer devices with dedicated memory.
+ * This library provides &struct drm_gem_vram_object (GEM VRAM), a GEM
+ * buffer object that is backed by video RAM (VRAM). It can be used for
+ * framebuffer devices with dedicated memory.
  *
  * The data structure &struct drm_vram_mm and its helpers implement a memory
- * manager for simple framebuffer devices with dedicated video memory. Buffer
- * objects are either placed in video RAM or evicted to system memory. The rsp.
- * buffer object is provided by &struct drm_gem_vram_object.
+ * manager for simple framebuffer devices with dedicated video memory. GEM
+ * VRAM buffer objects are either placed in the video memory or remain evicted
+ * to system memory.
+ *
+ * With the GEM interface userspace applications create, manage and destroy
+ * graphics buffers, such as an on-screen framebuffer. GEM does not provide
+ * an implementation of these interfaces. It's up to the DRM driver to
+ * provide an implementation that suits the hardware. If the hardware device
+ * contains dedicated video memory, the DRM driver can use the VRAM helper
+ * library. Each active buffer object is stored in video RAM. Active
+ * buffer are used for drawing the current frame, typically something like
+ * the frame's scanout buffer or the cursor image. If there's no more space
+ * left in VRAM, inactive GEM objects can be moved to system memory.
+ *
+ * The easiest way to use the VRAM helper library is to call
+ * drm_vram_helper_alloc_mm(). The function allocates and initializes an
+ * instance of &struct drm_vram_mm in &struct drm_device.vram_mm . Use
+ * &DRM_GEM_VRAM_DRIVER to initialize &struct drm_driver and
+ * &DRM_VRAM_MM_FILE_OPERATIONS to initialize &struct file_operations;
+ * as illustrated below.
+ *
+ * .. code-block:: c
+ *
+ *     struct file_operations fops ={
+ *             .owner = THIS_MODULE,
+ *             DRM_VRAM_MM_FILE_OPERATION
+ *     };
+ *     struct drm_driver drv = {
+ *             .driver_feature = DRM_ ... ,
+ *             .fops = &fops,
+ *             DRM_GEM_VRAM_DRIVER
+ *     };
+ *
+ *     int init_drm_driver()
+ *     {
+ *             struct drm_device *dev;
+ *             uint64_t vram_base;
+ *             unsigned long vram_size;
+ *             int ret;
+ *
+ *             // setup device, vram base and size
+ *             // ...
+ *
+ *             ret = drm_vram_helper_alloc_mm(dev, vram_base, vram_size);
+ *             if (ret)
+ *                     return ret;
+ *             return 0;
+ *     }
+ *
+ * This creates an instance of &struct drm_vram_mm, exports DRM userspace
+ * interfaces for GEM buffer management and initializes file operations to
+ * allow for accessing created GEM buffers. With this setup, the DRM driver
+ * manages an area of video RAM with VRAM MM and provides GEM VRAM objects
+ * to userspace.
+ *
+ * To clean up the VRAM memory management, call drm_vram_helper_release_mm()
+ * in the driver's clean-up code.
+ *
+ * .. code-block:: c
+ *
+ *     void fini_drm_driver()
+ *     {
+ *             struct drm_device *dev = ...;
+ *
+ *             drm_vram_helper_release_mm(dev);
+ *     }
+ *
+ * For drawing or scanout operations, buffer object have to be pinned in video
+ * RAM. Call drm_gem_vram_pin() with &DRM_GEM_VRAM_PL_FLAG_VRAM or
+ * &DRM_GEM_VRAM_PL_FLAG_SYSTEM to pin a buffer object in video RAM or system
+ * memory. Call drm_gem_vram_unpin() to release the pinned object afterwards.
+ *
+ * A buffer object that is pinned in video RAM has a fixed address within that
+ * memory region. Call drm_gem_vram_offset() to retrieve this value. Typically
+ * it's used to program the hardware's scanout engine for framebuffers, set
+ * the cursor overlay's image for a mouse cursor, or use it as input to the
+ * hardware's draing engine.
+ *
+ * To access a buffer object's memory from the DRM driver, call
+ * drm_gem_vram_kmap(). It (optionally) maps the buffer into kernel address
+ * space and returns the memory address. Use drm_gem_vram_kunmap() to
+ * release the mapping.
  */
 
 /*
@@ -670,9 +753,9 @@ EXPORT_SYMBOL(drm_gem_vram_driver_dumb_mmap_offset);
  * @plane:     a DRM plane
  * @new_state: the plane's new state
  *
- * During plane updates, this function pins the GEM VRAM
- * objects of the plane's new framebuffer to VRAM. Call
- * drm_gem_vram_plane_helper_cleanup_fb() to unpin them.
+ * During plane updates, this function sets the plane's fence and
+ * pins the GEM VRAM objects of the plane's new framebuffer to VRAM.
+ * Call drm_gem_vram_plane_helper_cleanup_fb() to unpin them.
  *
  * Returns:
  *     0 on success, or
@@ -698,6 +781,10 @@ drm_gem_vram_plane_helper_prepare_fb(struct drm_plane *plane,
                        goto err_drm_gem_vram_unpin;
        }
 
+       ret = drm_gem_fb_prepare_fb(plane, new_state);
+       if (ret)
+               goto err_drm_gem_vram_unpin;
+
        return 0;
 
 err_drm_gem_vram_unpin:
@@ -1018,7 +1105,6 @@ static struct ttm_bo_driver bo_driver = {
  * struct drm_vram_mm
  */
 
-#if defined(CONFIG_DEBUG_FS)
 static int drm_vram_mm_debugfs(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1035,27 +1121,18 @@ static int drm_vram_mm_debugfs(struct seq_file *m, void *data)
 static const struct drm_info_list drm_vram_mm_debugfs_list[] = {
        { "vram-mm", drm_vram_mm_debugfs, 0, NULL },
 };
-#endif
 
 /**
  * drm_vram_mm_debugfs_init() - Register VRAM MM debugfs file.
  *
  * @minor: drm minor device.
  *
- * Returns:
- * 0 on success, or
- * a negative error code otherwise.
  */
-int drm_vram_mm_debugfs_init(struct drm_minor *minor)
+void drm_vram_mm_debugfs_init(struct drm_minor *minor)
 {
-       int ret = 0;
-
-#if defined(CONFIG_DEBUG_FS)
-       ret = drm_debugfs_create_files(drm_vram_mm_debugfs_list,
-                                      ARRAY_SIZE(drm_vram_mm_debugfs_list),
-                                      minor->debugfs_root, minor);
-#endif
-       return ret;
+       drm_debugfs_create_files(drm_vram_mm_debugfs_list,
+                                ARRAY_SIZE(drm_vram_mm_debugfs_list),
+                                minor->debugfs_root, minor);
 }
 EXPORT_SYMBOL(drm_vram_mm_debugfs_init);
 
@@ -1202,3 +1279,6 @@ drm_vram_helper_mode_valid(struct drm_device *dev,
        return drm_vram_helper_mode_valid_internal(dev, mode, max_bpp);
 }
 EXPORT_SYMBOL(drm_vram_helper_mode_valid);
+
+MODULE_DESCRIPTION("DRM VRAM memory-management helpers");
+MODULE_LICENSE("GPL");
index 5714a78365ac6e14c716fa0836acad2a18b3f3a3..2470a352730b505e2698030bf6aee3734f5a33b0 100644 (file)
@@ -89,9 +89,11 @@ void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpr
 struct drm_minor *drm_minor_acquire(unsigned int minor_id);
 void drm_minor_release(struct drm_minor *minor);
 
+/* drm_managed.c */
+void drm_managed_release(struct drm_device *dev);
+
 /* drm_vblank.c */
 void drm_vblank_disable_and_save(struct drm_device *dev, unsigned int pipe);
-void drm_vblank_cleanup(struct drm_device *dev);
 
 /* IOCTLS */
 int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
@@ -141,7 +143,6 @@ void drm_sysfs_lease_event(struct drm_device *dev);
 /* drm_gem.c */
 struct drm_gem_object;
 int drm_gem_init(struct drm_device *dev);
-void drm_gem_destroy(struct drm_device *dev);
 int drm_gem_handle_create_tail(struct drm_file *file_priv,
                               struct drm_gem_object *obj,
                               u32 *handlep);
@@ -235,4 +236,4 @@ int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
 /* drm_framebuffer.c */
 void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent,
                                const struct drm_framebuffer *fb);
-int drm_framebuffer_debugfs_init(struct drm_minor *minor);
+void drm_framebuffer_debugfs_init(struct drm_minor *minor);
index c2b8d2a953aea7d30e65da7814e3c10331d3410c..328502aafaf7235bd8ae864cc06ba52e50c7bc8d 100644 (file)
@@ -599,8 +599,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
        DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH),
 
-       DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, 0),
+       DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, 0),
 
        DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY),
        DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
diff --git a/drivers/gpu/drm/drm_managed.c b/drivers/gpu/drm/drm_managed.c
new file mode 100644 (file)
index 0000000..9cebfe3
--- /dev/null
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Intel
+ *
+ * Based on drivers/base/devres.c
+ */
+
+#include <drm/drm_managed.h>
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+
+/**
+ * DOC: managed resources
+ *
+ * Inspired by struct &device managed resources, but tied to the lifetime of
+ * struct &drm_device, which can outlive the underlying physical device, usually
+ * when userspace has some open files and other handles to resources still open.
+ *
+ * Release actions can be added with drmm_add_action(), memory allocations can
+ * be done directly with drmm_kmalloc() and the related functions. Everything
+ * will be released on the final drm_dev_put() in reverse order of how the
+ * release actions have been added and memory has been allocated since driver
+ * loading started with drm_dev_init().
+ *
+ * Note that release actions and managed memory can also be added and removed
+ * during the lifetime of the driver, all the functions are fully concurrent
+ * safe. But it is recommended to use managed resources only for resources that
+ * change rarely, if ever, during the lifetime of the &drm_device instance.
+ */
+
+struct drmres_node {
+       struct list_head        entry;
+       drmres_release_t        release;
+       const char              *name;
+       size_t                  size;
+};
+
+struct drmres {
+       struct drmres_node              node;
+       /*
+        * Some archs want to perform DMA into kmalloc caches
+        * and need a guaranteed alignment larger than
+        * the alignment of a 64-bit integer.
+        * Thus we use ARCH_KMALLOC_MINALIGN here and get exactly the same
+        * buffer alignment as if it was allocated by plain kmalloc().
+        */
+       u8 __aligned(ARCH_KMALLOC_MINALIGN) data[];
+};
+
+static void free_dr(struct drmres *dr)
+{
+       kfree_const(dr->node.name);
+       kfree(dr);
+}
+
+void drm_managed_release(struct drm_device *dev)
+{
+       struct drmres *dr, *tmp;
+
+       drm_dbg_drmres(dev, "drmres release begin\n");
+       list_for_each_entry_safe(dr, tmp, &dev->managed.resources, node.entry) {
+               drm_dbg_drmres(dev, "REL %p %s (%zu bytes)\n",
+                              dr, dr->node.name, dr->node.size);
+
+               if (dr->node.release)
+                       dr->node.release(dev, dr->node.size ? *(void **)&dr->data : NULL);
+
+               list_del(&dr->node.entry);
+               free_dr(dr);
+       }
+       drm_dbg_drmres(dev, "drmres release end\n");
+}
+
+/*
+ * Always inline so that kmalloc_track_caller tracks the actual interesting
+ * caller outside of drm_managed.c.
+ */
+static __always_inline struct drmres * alloc_dr(drmres_release_t release,
+                                               size_t size, gfp_t gfp, int nid)
+{
+       size_t tot_size;
+       struct drmres *dr;
+
+       /* We must catch any near-SIZE_MAX cases that could overflow. */
+       if (unlikely(check_add_overflow(sizeof(*dr), size, &tot_size)))
+               return NULL;
+
+       dr = kmalloc_node_track_caller(tot_size, gfp, nid);
+       if (unlikely(!dr))
+               return NULL;
+
+       memset(dr, 0, offsetof(struct drmres, data));
+
+       INIT_LIST_HEAD(&dr->node.entry);
+       dr->node.release = release;
+       dr->node.size = size;
+
+       return dr;
+}
+
+static void del_dr(struct drm_device *dev, struct drmres *dr)
+{
+       list_del_init(&dr->node.entry);
+
+       drm_dbg_drmres(dev, "DEL %p %s (%lu bytes)\n",
+                      dr, dr->node.name, (unsigned long) dr->node.size);
+}
+
+static void add_dr(struct drm_device *dev, struct drmres *dr)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev->managed.lock, flags);
+       list_add(&dr->node.entry, &dev->managed.resources);
+       spin_unlock_irqrestore(&dev->managed.lock, flags);
+
+       drm_dbg_drmres(dev, "ADD %p %s (%lu bytes)\n",
+                      dr, dr->node.name, (unsigned long) dr->node.size);
+}
+
+/**
+ * drmm_add_final_kfree - add release action for the final kfree()
+ * @dev: DRM device
+ * @container: pointer to the kmalloc allocation containing @dev
+ *
+ * Since the allocation containing the struct &drm_device must be allocated
+ * before it can be initialized with drm_dev_init() there's no way to allocate
+ * that memory with drmm_kmalloc(). To side-step this chicken-egg problem the
+ * pointer for this final kfree() must be specified by calling this function. It
+ * will be released in the final drm_dev_put() for @dev, after all other release
+ * actions installed through drmm_add_action() have been processed.
+ */
+void drmm_add_final_kfree(struct drm_device *dev, void *container)
+{
+       WARN_ON(dev->managed.final_kfree);
+       WARN_ON(dev < (struct drm_device *) container);
+       WARN_ON(dev + 1 > (struct drm_device *) (container + ksize(container)));
+       dev->managed.final_kfree = container;
+}
+EXPORT_SYMBOL(drmm_add_final_kfree);
+
+int __drmm_add_action(struct drm_device *dev,
+                     drmres_release_t action,
+                     void *data, const char *name)
+{
+       struct drmres *dr;
+       void **void_ptr;
+
+       dr = alloc_dr(action, data ? sizeof(void*) : 0,
+                     GFP_KERNEL | __GFP_ZERO,
+                     dev_to_node(dev->dev));
+       if (!dr) {
+               drm_dbg_drmres(dev, "failed to add action %s for %p\n",
+                              name, data);
+               return -ENOMEM;
+       }
+
+       dr->node.name = kstrdup_const(name, GFP_KERNEL);
+       if (data) {
+               void_ptr = (void **)&dr->data;
+               *void_ptr = data;
+       }
+
+       add_dr(dev, dr);
+
+       return 0;
+}
+EXPORT_SYMBOL(__drmm_add_action);
+
+int __drmm_add_action_or_reset(struct drm_device *dev,
+                              drmres_release_t action,
+                              void *data, const char *name)
+{
+       int ret;
+
+       ret = __drmm_add_action(dev, action, data, name);
+       if (ret)
+               action(dev, data);
+
+       return ret;
+}
+EXPORT_SYMBOL(__drmm_add_action_or_reset);
+
+/**
+ * drmm_kmalloc - &drm_device managed kmalloc()
+ * @dev: DRM device
+ * @size: size of the memory allocation
+ * @gfp: GFP allocation flags
+ *
+ * This is a &drm_device managed version of kmalloc(). The allocated memory is
+ * automatically freed on the final drm_dev_put(). Memory can also be freed
+ * before the final drm_dev_put() by calling drmm_kfree().
+ */
+void *drmm_kmalloc(struct drm_device *dev, size_t size, gfp_t gfp)
+{
+       struct drmres *dr;
+
+       dr = alloc_dr(NULL, size, gfp, dev_to_node(dev->dev));
+       if (!dr) {
+               drm_dbg_drmres(dev, "failed to allocate %zu bytes, %u flags\n",
+                              size, gfp);
+               return NULL;
+       }
+       dr->node.name = kstrdup_const("kmalloc", GFP_KERNEL);
+
+       add_dr(dev, dr);
+
+       return dr->data;
+}
+EXPORT_SYMBOL(drmm_kmalloc);
+
+/**
+ * drmm_kstrdup - &drm_device managed kstrdup()
+ * @dev: DRM device
+ * @s: 0-terminated string to be duplicated
+ * @gfp: GFP allocation flags
+ *
+ * This is a &drm_device managed version of kstrdup(). The allocated memory is
+ * automatically freed on the final drm_dev_put() and works exactly like a
+ * memory allocation obtained by drmm_kmalloc().
+ */
+char *drmm_kstrdup(struct drm_device *dev, const char *s, gfp_t gfp)
+{
+       size_t size;
+       char *buf;
+
+       if (!s)
+               return NULL;
+
+       size = strlen(s) + 1;
+       buf = drmm_kmalloc(dev, size, gfp);
+       if (buf)
+               memcpy(buf, s, size);
+       return buf;
+}
+EXPORT_SYMBOL_GPL(drmm_kstrdup);
+
+/**
+ * drmm_kfree - &drm_device managed kfree()
+ * @dev: DRM device
+ * @data: memory allocation to be freed
+ *
+ * This is a &drm_device managed version of kfree() which can be used to
+ * release memory allocated through drmm_kmalloc() or any of its related
+ * functions before the final drm_dev_put() of @dev.
+ */
+void drmm_kfree(struct drm_device *dev, void *data)
+{
+       struct drmres *dr_match = NULL, *dr;
+       unsigned long flags;
+
+       if (!data)
+               return;
+
+       spin_lock_irqsave(&dev->managed.lock, flags);
+       list_for_each_entry(dr, &dev->managed.resources, node.entry) {
+               if (dr->data == data) {
+                       dr_match = dr;
+                       del_dr(dev, dr_match);
+                       break;
+               }
+       }
+       spin_unlock_irqrestore(&dev->managed.lock, flags);
+
+       if (WARN_ON(!dr_match))
+               return;
+
+       free_dr(dr_match);
+}
+EXPORT_SYMBOL(drmm_kfree);
index 558baf989f5a8432871342c399c6974620485d1a..bb27c82757f17caa3b29c58b75438b518eaf3d6e 100644 (file)
@@ -169,7 +169,8 @@ int mipi_dbi_command_buf(struct mipi_dbi *dbi, u8 cmd, u8 *data, size_t len)
 EXPORT_SYMBOL(mipi_dbi_command_buf);
 
 /* This should only be used by mipi_dbi_command() */
-int mipi_dbi_command_stackbuf(struct mipi_dbi *dbi, u8 cmd, u8 *data, size_t len)
+int mipi_dbi_command_stackbuf(struct mipi_dbi *dbi, u8 cmd, const u8 *data,
+                             size_t len)
 {
        u8 *buf;
        int ret;
@@ -510,6 +511,10 @@ int mipi_dbi_dev_init_with_formats(struct mipi_dbi_dev *dbidev,
        if (!dbidev->dbi.command)
                return -EINVAL;
 
+       ret = drmm_mode_config_init(drm);
+       if (ret)
+               return ret;
+
        dbidev->tx_buf = devm_kmalloc(drm->dev, tx_buf_size, GFP_KERNEL);
        if (!dbidev->tx_buf)
                return -ENOMEM;
@@ -578,26 +583,6 @@ int mipi_dbi_dev_init(struct mipi_dbi_dev *dbidev,
 }
 EXPORT_SYMBOL(mipi_dbi_dev_init);
 
-/**
- * mipi_dbi_release - DRM driver release helper
- * @drm: DRM device
- *
- * This function finalizes and frees &mipi_dbi.
- *
- * Drivers can use this as their &drm_driver->release callback.
- */
-void mipi_dbi_release(struct drm_device *drm)
-{
-       struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(drm);
-
-       DRM_DEBUG_DRIVER("\n");
-
-       drm_mode_config_cleanup(drm);
-       drm_dev_fini(drm);
-       kfree(dbidev);
-}
-EXPORT_SYMBOL(mipi_dbi_release);
-
 /**
  * mipi_dbi_hw_reset - Hardware reset of controller
  * @dbi: MIPI DBI structure
@@ -1308,10 +1293,8 @@ static const struct file_operations mipi_dbi_debugfs_command_fops = {
  * controller or getting the read command values.
  * Drivers can use this as their &drm_driver->debugfs_init callback.
  *
- * Returns:
- * Zero on success, negative error code on failure.
  */
-int mipi_dbi_debugfs_init(struct drm_minor *minor)
+void mipi_dbi_debugfs_init(struct drm_minor *minor)
 {
        struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(minor->dev);
        umode_t mode = S_IFREG | S_IWUSR;
@@ -1320,8 +1303,6 @@ int mipi_dbi_debugfs_init(struct drm_minor *minor)
                mode |= S_IRUGO;
        debugfs_create_file("command", mode, minor->debugfs_root, dbidev,
                            &mipi_dbi_debugfs_command_fops);
-
-       return 0;
 }
 EXPORT_SYMBOL(mipi_dbi_debugfs_init);
 
index 8981abe8b7c940e82fe33fa1085994035dd0daf4..f4ca1ff80af9f23ff2ada4dd545917014aa32e74 100644 (file)
@@ -212,20 +212,6 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
                                   &drm_mm_interval_tree_augment);
 }
 
-#define RB_INSERT(root, member, expr) do { \
-       struct rb_node **link = &root.rb_node, *rb = NULL; \
-       u64 x = expr(node); \
-       while (*link) { \
-               rb = *link; \
-               if (x < expr(rb_entry(rb, struct drm_mm_node, member))) \
-                       link = &rb->rb_left; \
-               else \
-                       link = &rb->rb_right; \
-       } \
-       rb_link_node(&node->member, rb, link); \
-       rb_insert_color(&node->member, &root); \
-} while (0)
-
 #define HOLE_SIZE(NODE) ((NODE)->hole_size)
 #define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
 
@@ -255,16 +241,42 @@ static void insert_hole_size(struct rb_root_cached *root,
        rb_insert_color_cached(&node->rb_hole_size, root, first);
 }
 
+RB_DECLARE_CALLBACKS_MAX(static, augment_callbacks,
+                        struct drm_mm_node, rb_hole_addr,
+                        u64, subtree_max_hole, HOLE_SIZE)
+
+static void insert_hole_addr(struct rb_root *root, struct drm_mm_node *node)
+{
+       struct rb_node **link = &root->rb_node, *rb_parent = NULL;
+       u64 start = HOLE_ADDR(node), subtree_max_hole = node->subtree_max_hole;
+       struct drm_mm_node *parent;
+
+       while (*link) {
+               rb_parent = *link;
+               parent = rb_entry(rb_parent, struct drm_mm_node, rb_hole_addr);
+               if (parent->subtree_max_hole < subtree_max_hole)
+                       parent->subtree_max_hole = subtree_max_hole;
+               if (start < HOLE_ADDR(parent))
+                       link = &parent->rb_hole_addr.rb_left;
+               else
+                       link = &parent->rb_hole_addr.rb_right;
+       }
+
+       rb_link_node(&node->rb_hole_addr, rb_parent, link);
+       rb_insert_augmented(&node->rb_hole_addr, root, &augment_callbacks);
+}
+
 static void add_hole(struct drm_mm_node *node)
 {
        struct drm_mm *mm = node->mm;
 
        node->hole_size =
                __drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
+       node->subtree_max_hole = node->hole_size;
        DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
 
        insert_hole_size(&mm->holes_size, node);
-       RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR);
+       insert_hole_addr(&mm->holes_addr, node);
 
        list_add(&node->hole_stack, &mm->hole_stack);
 }
@@ -275,8 +287,10 @@ static void rm_hole(struct drm_mm_node *node)
 
        list_del(&node->hole_stack);
        rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size);
-       rb_erase(&node->rb_hole_addr, &node->mm->holes_addr);
+       rb_erase_augmented(&node->rb_hole_addr, &node->mm->holes_addr,
+                          &augment_callbacks);
        node->hole_size = 0;
+       node->subtree_max_hole = 0;
 
        DRM_MM_BUG_ON(drm_mm_hole_follows(node));
 }
@@ -361,9 +375,90 @@ first_hole(struct drm_mm *mm,
        }
 }
 
+/**
+ * next_hole_high_addr - returns next hole for a DRM_MM_INSERT_HIGH mode request
+ * @entry: previously selected drm_mm_node
+ * @size: size of the a hole needed for the request
+ *
+ * This function will verify whether left subtree of @entry has hole big enough
+ * to fit the requtested size. If so, it will return previous node of @entry or
+ * else it will return parent node of @entry
+ *
+ * It will also skip the complete left subtree if subtree_max_hole of that
+ * subtree is same as the subtree_max_hole of the @entry.
+ *
+ * Returns:
+ * previous node of @entry if left subtree of @entry can serve the request or
+ * else return parent of @entry
+ */
+static struct drm_mm_node *
+next_hole_high_addr(struct drm_mm_node *entry, u64 size)
+{
+       struct rb_node *rb_node, *left_rb_node, *parent_rb_node;
+       struct drm_mm_node *left_node;
+
+       if (!entry)
+               return NULL;
+
+       rb_node = &entry->rb_hole_addr;
+       if (rb_node->rb_left) {
+               left_rb_node = rb_node->rb_left;
+               parent_rb_node = rb_parent(rb_node);
+               left_node = rb_entry(left_rb_node,
+                                    struct drm_mm_node, rb_hole_addr);
+               if ((left_node->subtree_max_hole < size ||
+                    entry->size == entry->subtree_max_hole) &&
+                   parent_rb_node && parent_rb_node->rb_left != rb_node)
+                       return rb_hole_addr_to_node(parent_rb_node);
+       }
+
+       return rb_hole_addr_to_node(rb_prev(rb_node));
+}
+
+/**
+ * next_hole_low_addr - returns next hole for a DRM_MM_INSERT_LOW mode request
+ * @entry: previously selected drm_mm_node
+ * @size: size of the a hole needed for the request
+ *
+ * This function will verify whether right subtree of @entry has hole big enough
+ * to fit the requtested size. If so, it will return next node of @entry or
+ * else it will return parent node of @entry
+ *
+ * It will also skip the complete right subtree if subtree_max_hole of that
+ * subtree is same as the subtree_max_hole of the @entry.
+ *
+ * Returns:
+ * next node of @entry if right subtree of @entry can serve the request or
+ * else return parent of @entry
+ */
+static struct drm_mm_node *
+next_hole_low_addr(struct drm_mm_node *entry, u64 size)
+{
+       struct rb_node *rb_node, *right_rb_node, *parent_rb_node;
+       struct drm_mm_node *right_node;
+
+       if (!entry)
+               return NULL;
+
+       rb_node = &entry->rb_hole_addr;
+       if (rb_node->rb_right) {
+               right_rb_node = rb_node->rb_right;
+               parent_rb_node = rb_parent(rb_node);
+               right_node = rb_entry(right_rb_node,
+                                     struct drm_mm_node, rb_hole_addr);
+               if ((right_node->subtree_max_hole < size ||
+                    entry->size == entry->subtree_max_hole) &&
+                   parent_rb_node && parent_rb_node->rb_right != rb_node)
+                       return rb_hole_addr_to_node(parent_rb_node);
+       }
+
+       return rb_hole_addr_to_node(rb_next(rb_node));
+}
+
 static struct drm_mm_node *
 next_hole(struct drm_mm *mm,
          struct drm_mm_node *node,
+         u64 size,
          enum drm_mm_insert_mode mode)
 {
        switch (mode) {
@@ -372,10 +467,10 @@ next_hole(struct drm_mm *mm,
                return rb_hole_size_to_node(rb_prev(&node->rb_hole_size));
 
        case DRM_MM_INSERT_LOW:
-               return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr));
+               return next_hole_low_addr(node, size);
 
        case DRM_MM_INSERT_HIGH:
-               return rb_hole_addr_to_node(rb_prev(&node->rb_hole_addr));
+               return next_hole_high_addr(node, size);
 
        case DRM_MM_INSERT_EVICT:
                node = list_next_entry(node, hole_stack);
@@ -489,7 +584,7 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
        remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
        for (hole = first_hole(mm, range_start, range_end, size, mode);
             hole;
-            hole = once ? NULL : next_hole(mm, hole, mode)) {
+            hole = once ? NULL : next_hole(mm, hole, size, mode)) {
                u64 hole_start = __drm_mm_hole_node_start(hole);
                u64 hole_end = hole_start + hole->hole_size;
                u64 adj_start, adj_end;
index 08e6eff6a1797b2e0686af7610f061b587ca419d..5761f838a057611a7865e28f8358714c01e87562 100644 (file)
@@ -25,6 +25,7 @@
 #include <drm/drm_drv.h>
 #include <drm/drm_encoder.h>
 #include <drm/drm_file.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_mode_config.h>
 #include <drm/drm_print.h>
 #include <linux/dma-resv.h>
@@ -373,8 +374,14 @@ static int drm_mode_create_standard_properties(struct drm_device *dev)
        return 0;
 }
 
+static void drm_mode_config_init_release(struct drm_device *dev, void *ptr)
+{
+       drm_mode_config_cleanup(dev);
+}
+
 /**
- * drm_mode_config_init - initialize DRM mode_configuration structure
+ * drmm_mode_config_init - managed DRM mode_configuration structure
+ *     initialization
  * @dev: DRM device
  *
  * Initialize @dev's mode_config structure, used for tracking the graphics
@@ -384,8 +391,12 @@ static int drm_mode_create_standard_properties(struct drm_device *dev)
  * problem, since this should happen single threaded at init time. It is the
  * driver's problem to ensure this guarantee.
  *
+ * Cleanup is automatically handled through registering drm_mode_config_cleanup
+ * with drmm_add_action().
+ *
+ * Returns: 0 on success, negative error value on failure.
  */
-void drm_mode_config_init(struct drm_device *dev)
+int drmm_mode_config_init(struct drm_device *dev)
 {
        mutex_init(&dev->mode_config.mutex);
        drm_modeset_lock_init(&dev->mode_config.connection_mutex);
@@ -443,8 +454,11 @@ void drm_mode_config_init(struct drm_device *dev)
                drm_modeset_acquire_fini(&modeset_ctx);
                dma_resv_fini(&resv);
        }
+
+       return drmm_add_action_or_reset(dev, drm_mode_config_init_release,
+                                       NULL);
 }
-EXPORT_SYMBOL(drm_mode_config_init);
+EXPORT_SYMBOL(drmm_mode_config_init);
 
 /**
  * drm_mode_config_cleanup - free up DRM mode_config info
@@ -456,6 +470,9 @@ EXPORT_SYMBOL(drm_mode_config_init);
  * Note that since this /should/ happen single-threaded at driver/device
  * teardown time, no locking is required. It's the driver's job to ensure that
  * this guarantee actually holds true.
+ *
+ * FIXME: With the managed drmm_mode_config_init() it is no longer necessary for
+ * drivers to explicitly call this function.
  */
 void drm_mode_config_cleanup(struct drm_device *dev)
 {
@@ -532,3 +549,90 @@ void drm_mode_config_cleanup(struct drm_device *dev)
        drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
 }
 EXPORT_SYMBOL(drm_mode_config_cleanup);
+
+static u32 full_encoder_mask(struct drm_device *dev)
+{
+       struct drm_encoder *encoder;
+       u32 encoder_mask = 0;
+
+       drm_for_each_encoder(encoder, dev)
+               encoder_mask |= drm_encoder_mask(encoder);
+
+       return encoder_mask;
+}
+
+/*
+ * For some reason we want the encoder itself included in
+ * possible_clones. Make life easy for drivers by allowing them
+ * to leave possible_clones unset if no cloning is possible.
+ */
+static void fixup_encoder_possible_clones(struct drm_encoder *encoder)
+{
+       if (encoder->possible_clones == 0)
+               encoder->possible_clones = drm_encoder_mask(encoder);
+}
+
+static void validate_encoder_possible_clones(struct drm_encoder *encoder)
+{
+       struct drm_device *dev = encoder->dev;
+       u32 encoder_mask = full_encoder_mask(dev);
+       struct drm_encoder *other;
+
+       drm_for_each_encoder(other, dev) {
+               WARN(!!(encoder->possible_clones & drm_encoder_mask(other)) !=
+                    !!(other->possible_clones & drm_encoder_mask(encoder)),
+                    "possible_clones mismatch: "
+                    "[ENCODER:%d:%s] mask=0x%x possible_clones=0x%x vs. "
+                    "[ENCODER:%d:%s] mask=0x%x possible_clones=0x%x\n",
+                    encoder->base.id, encoder->name,
+                    drm_encoder_mask(encoder), encoder->possible_clones,
+                    other->base.id, other->name,
+                    drm_encoder_mask(other), other->possible_clones);
+       }
+
+       WARN((encoder->possible_clones & drm_encoder_mask(encoder)) == 0 ||
+            (encoder->possible_clones & ~encoder_mask) != 0,
+            "Bogus possible_clones: "
+            "[ENCODER:%d:%s] possible_clones=0x%x (full encoder mask=0x%x)\n",
+            encoder->base.id, encoder->name,
+            encoder->possible_clones, encoder_mask);
+}
+
+static u32 full_crtc_mask(struct drm_device *dev)
+{
+       struct drm_crtc *crtc;
+       u32 crtc_mask = 0;
+
+       drm_for_each_crtc(crtc, dev)
+               crtc_mask |= drm_crtc_mask(crtc);
+
+       return crtc_mask;
+}
+
+static void validate_encoder_possible_crtcs(struct drm_encoder *encoder)
+{
+       u32 crtc_mask = full_crtc_mask(encoder->dev);
+
+       WARN((encoder->possible_crtcs & crtc_mask) == 0 ||
+            (encoder->possible_crtcs & ~crtc_mask) != 0,
+            "Bogus possible_crtcs: "
+            "[ENCODER:%d:%s] possible_crtcs=0x%x (full crtc mask=0x%x)\n",
+            encoder->base.id, encoder->name,
+            encoder->possible_crtcs, crtc_mask);
+}
+
+void drm_mode_config_validate(struct drm_device *dev)
+{
+       struct drm_encoder *encoder;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return;
+
+       drm_for_each_encoder(encoder, dev)
+               fixup_encoder_possible_clones(encoder);
+
+       drm_for_each_encoder(encoder, dev) {
+               validate_encoder_possible_clones(encoder);
+               validate_encoder_possible_crtcs(encoder);
+       }
+}
index 35c2719407a828d00f688fa7e2f336f38e3dabcf..901b078abf40c5d7bf1819e6f6cd4ac256c4f774 100644 (file)
@@ -402,12 +402,13 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
 {
        struct drm_mode_obj_get_properties *arg = data;
        struct drm_mode_object *obj;
+       struct drm_modeset_acquire_ctx ctx;
        int ret = 0;
 
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                return -EOPNOTSUPP;
 
-       drm_modeset_lock_all(dev);
+       DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
 
        obj = drm_mode_object_find(dev, file_priv, arg->obj_id, arg->obj_type);
        if (!obj) {
@@ -427,7 +428,7 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
 out_unref:
        drm_mode_object_put(obj);
 out:
-       drm_modeset_unlock_all(dev);
+       DRM_MODESET_LOCK_ALL_END(ctx, ret);
        return ret;
 }
 
@@ -449,12 +450,13 @@ static int set_property_legacy(struct drm_mode_object *obj,
 {
        struct drm_device *dev = prop->dev;
        struct drm_mode_object *ref;
+       struct drm_modeset_acquire_ctx ctx;
        int ret = -EINVAL;
 
        if (!drm_property_change_valid_get(prop, prop_value, &ref))
                return -EINVAL;
 
-       drm_modeset_lock_all(dev);
+       DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
        switch (obj->type) {
        case DRM_MODE_OBJECT_CONNECTOR:
                ret = drm_connector_set_obj_prop(obj, prop, prop_value);
@@ -468,7 +470,7 @@ static int set_property_legacy(struct drm_mode_object *obj,
                break;
        }
        drm_property_change_valid_put(prop, ref);
-       drm_modeset_unlock_all(dev);
+       DRM_MODESET_LOCK_ALL_END(ctx, ret);
 
        return ret;
 }
index d4d64518e11b8fc06f45eddf2673a8c6ba0bfe34..fec1c33b30456ab33cb2fa8f45b61f050b6c8fcc 100644 (file)
@@ -747,32 +747,6 @@ void drm_mode_set_name(struct drm_display_mode *mode)
 }
 EXPORT_SYMBOL(drm_mode_set_name);
 
-/**
- * drm_mode_hsync - get the hsync of a mode
- * @mode: mode
- *
- * Returns:
- * @modes's hsync rate in kHz, rounded to the nearest integer. Calculates the
- * value first if it is not yet set.
- */
-int drm_mode_hsync(const struct drm_display_mode *mode)
-{
-       unsigned int calc_val;
-
-       if (mode->hsync)
-               return mode->hsync;
-
-       if (mode->htotal <= 0)
-               return 0;
-
-       calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
-       calc_val += 500;                                /* round to 1000Hz */
-       calc_val /= 1000;                               /* truncate to kHz */
-
-       return calc_val;
-}
-EXPORT_SYMBOL(drm_mode_hsync);
-
 /**
  * drm_mode_vrefresh - get the vrefresh of a mode
  * @mode: mode
index 81aa215619821ab94b86c5207130248ebc5f94a0..75e2b7053f353b818f770c1a728413e02fe2f576 100644 (file)
 #include <drm/drm.h>
 #include <drm/drm_agpsupport.h>
 #include <drm/drm_drv.h>
-#include <drm/drm_pci.h>
 #include <drm/drm_print.h>
 
 #include "drm_internal.h"
 #include "drm_legacy.h"
 
+#ifdef CONFIG_DRM_LEGACY
+
 /**
  * drm_pci_alloc - Allocate a PCI consistent memory block, for DMA.
  * @dev: DRM device
@@ -93,6 +94,7 @@ void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
 }
 
 EXPORT_SYMBOL(drm_pci_free);
+#endif
 
 static int drm_get_pci_domain(struct drm_device *dev)
 {
index d6ad60ab0d389a3f83d073ffaca2908d3af48c24..4af173ced32772e1b23ca173ec983cf3fbeb1ae6 100644 (file)
@@ -289,6 +289,8 @@ EXPORT_SYMBOL(drm_universal_plane_init);
 
 int drm_plane_register_all(struct drm_device *dev)
 {
+       unsigned int num_planes = 0;
+       unsigned int num_zpos = 0;
        struct drm_plane *plane;
        int ret = 0;
 
@@ -297,8 +299,15 @@ int drm_plane_register_all(struct drm_device *dev)
                        ret = plane->funcs->late_register(plane);
                if (ret)
                        return ret;
+
+               if (plane->zpos_property)
+                       num_zpos++;
+               num_planes++;
        }
 
+       drm_WARN(dev, num_zpos && num_planes != num_zpos,
+                "Mixing planes with and without zpos property is invalid\n");
+
        return 0;
 }
 
index da7b0b0c1090de60bc4baa2f5b4b03e0466cdf0c..2d5ce690d214b4835c395603ec3267e4e019eddb 100644 (file)
@@ -30,6 +30,7 @@
 #include <drm/drm_crtc.h>
 #include <drm/drm_drv.h>
 #include <drm/drm_framebuffer.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_modeset_helper_vtables.h>
 #include <drm/drm_print.h>
 #include <drm/drm_vblank.h>
 /**
  * DOC: vblank handling
  *
+ * From the computer's perspective, every time the monitor displays
+ * a new frame the scanout engine has "scanned out" the display image
+ * from top to bottom, one row of pixels at a time. The current row
+ * of pixels is referred to as the current scanline.
+ *
+ * In addition to the display's visible area, there's usually a couple of
+ * extra scanlines which aren't actually displayed on the screen.
+ * These extra scanlines don't contain image data and are occasionally used
+ * for features like audio and infoframes. The region made up of these
+ * scanlines is referred to as the vertical blanking region, or vblank for
+ * short.
+ *
+ * For historical reference, the vertical blanking period was designed to
+ * give the electron gun (on CRTs) enough time to move back to the top of
+ * the screen to start scanning out the next frame. Similar for horizontal
+ * blanking periods. They were designed to give the electron gun enough
+ * time to move back to the other side of the screen to start scanning the
+ * next scanline.
+ *
+ * ::
+ *
+ *
+ *    physical →   ⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽
+ *    top of      |                                        |
+ *    display     |                                        |
+ *                |               New frame                |
+ *                |                                        |
+ *                |↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓|
+ *                |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~| ← Scanline,
+ *                |↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓|   updates the
+ *                |                                        |   frame as it
+ *                |                                        |   travels down
+ *                |                                        |   ("sacn out")
+ *                |               Old frame                |
+ *                |                                        |
+ *                |                                        |
+ *                |                                        |
+ *                |                                        |   physical
+ *                |                                        |   bottom of
+ *    vertical    |⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽| ← display
+ *    blanking    ┆xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx┆
+ *    region   →  ┆xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx┆
+ *                ┆xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx┆
+ *    start of →   ⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽
+ *    new frame
+ *
+ * "Physical top of display" is the reference point for the high-precision/
+ * corrected timestamp.
+ *
+ * On a lot of display hardware, programming needs to take effect during the
+ * vertical blanking period so that settings like gamma, the image buffer
+ * buffer to be scanned out, etc. can safely be changed without showing
+ * any visual artifacts on the screen. In some unforgiving hardware, some of
+ * this programming has to both start and end in the same vblank. To help
+ * with the timing of the hardware programming, an interrupt is usually
+ * available to notify the driver when it can start the updating of registers.
+ * The interrupt is in this context named the vblank interrupt.
+ *
+ * The vblank interrupt may be fired at different points depending on the
+ * hardware. Some hardware implementations will fire the interrupt when the
+ * new frame start, other implementations will fire the interrupt at different
+ * points in time.
+ *
  * Vertical blanking plays a major role in graphics rendering. To achieve
  * tear-free display, users must synchronize page flips and/or rendering to
  * vertical blanking. The DRM API offers ioctls to perform page flips
@@ -278,8 +342,8 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
 
        DRM_DEBUG_VBL("updating vblank count on crtc %u:"
                      " current=%llu, diff=%u, hw=%u hw_last=%u\n",
-                     pipe, atomic64_read(&vblank->count), diff,
-                     cur_vblank, vblank->last);
+                     pipe, (unsigned long long)atomic64_read(&vblank->count),
+                     diff, cur_vblank, vblank->last);
 
        if (diff == 0) {
                WARN_ON_ONCE(cur_vblank != vblank->last);
@@ -425,14 +489,10 @@ static void vblank_disable_fn(struct timer_list *t)
        spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 }
 
-void drm_vblank_cleanup(struct drm_device *dev)
+static void drm_vblank_init_release(struct drm_device *dev, void *ptr)
 {
        unsigned int pipe;
 
-       /* Bail if the driver didn't call drm_vblank_init() */
-       if (dev->num_crtcs == 0)
-               return;
-
        for (pipe = 0; pipe < dev->num_crtcs; pipe++) {
                struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 
@@ -441,10 +501,6 @@ void drm_vblank_cleanup(struct drm_device *dev)
 
                del_timer_sync(&vblank->disable_timer);
        }
-
-       kfree(dev->vblank);
-
-       dev->num_crtcs = 0;
 }
 
 /**
@@ -453,25 +509,29 @@ void drm_vblank_cleanup(struct drm_device *dev)
  * @num_crtcs: number of CRTCs supported by @dev
  *
  * This function initializes vblank support for @num_crtcs display pipelines.
- * Cleanup is handled by the DRM core, or through calling drm_dev_fini() for
- * drivers with a &drm_driver.release callback.
+ * Cleanup is handled automatically through a cleanup function added with
+ * drmm_add_action().
  *
  * Returns:
  * Zero on success or a negative error code on failure.
  */
 int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
 {
-       int ret = -ENOMEM;
+       int ret;
        unsigned int i;
 
        spin_lock_init(&dev->vbl_lock);
        spin_lock_init(&dev->vblank_time_lock);
 
+       dev->vblank = drmm_kcalloc(dev, num_crtcs, sizeof(*dev->vblank), GFP_KERNEL);
+       if (!dev->vblank)
+               return -ENOMEM;
+
        dev->num_crtcs = num_crtcs;
 
-       dev->vblank = kcalloc(num_crtcs, sizeof(*dev->vblank), GFP_KERNEL);
-       if (!dev->vblank)
-               goto err;
+       ret = drmm_add_action(dev, drm_vblank_init_release, NULL);
+       if (ret)
+               return ret;
 
        for (i = 0; i < num_crtcs; i++) {
                struct drm_vblank_crtc *vblank = &dev->vblank[i];
@@ -486,10 +546,6 @@ int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
        DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
 
        return 0;
-
-err:
-       dev->num_crtcs = 0;
-       return ret;
 }
 EXPORT_SYMBOL(drm_vblank_init);
 
index aa88911bbc06d48aea6c664a25f3bbb47deb1115..56197ae0b2f96a9812890489bf82a1a01148571a 100644 (file)
@@ -595,8 +595,8 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
                        vma->vm_ops = &drm_vm_ops;
                        break;
                }
+               fallthrough;    /* to _DRM_FRAME_BUFFER... */
 #endif
-               /* fall through - to _DRM_FRAME_BUFFER... */
        case _DRM_FRAME_BUFFER:
        case _DRM_REGISTERS:
                offset = drm_core_get_reg_ofs(dev);
@@ -621,7 +621,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
                    vma->vm_end - vma->vm_start, vma->vm_page_prot))
                        return -EAGAIN;
                vma->vm_page_prot = drm_dma_prot(map->type, vma);
-               /* fall through - to _DRM_SHM */
+               fallthrough;    /* to _DRM_SHM */
        case _DRM_SHM:
                vma->vm_ops = &drm_vm_shm_ops;
                vma->vm_private_data = (void *)map;
diff --git a/drivers/gpu/drm/drm_vram_helper_common.c b/drivers/gpu/drm/drm_vram_helper_common.c
deleted file mode 100644 (file)
index 2000d9b..0000000
+++ /dev/null
@@ -1,94 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#include <linux/module.h>
-
-/**
- * DOC: overview
- *
- * This library provides &struct drm_gem_vram_object (GEM VRAM), a GEM
- * buffer object that is backed by video RAM. It can be used for
- * framebuffer devices with dedicated memory. The video RAM is managed
- * by &struct drm_vram_mm (VRAM MM).
- *
- * With the GEM interface userspace applications create, manage and destroy
- * graphics buffers, such as an on-screen framebuffer. GEM does not provide
- * an implementation of these interfaces. It's up to the DRM driver to
- * provide an implementation that suits the hardware. If the hardware device
- * contains dedicated video memory, the DRM driver can use the VRAM helper
- * library. Each active buffer object is stored in video RAM. Active
- * buffer are used for drawing the current frame, typically something like
- * the frame's scanout buffer or the cursor image. If there's no more space
- * left in VRAM, inactive GEM objects can be moved to system memory.
- *
- * The easiest way to use the VRAM helper library is to call
- * drm_vram_helper_alloc_mm(). The function allocates and initializes an
- * instance of &struct drm_vram_mm in &struct drm_device.vram_mm . Use
- * &DRM_GEM_VRAM_DRIVER to initialize &struct drm_driver and
- * &DRM_VRAM_MM_FILE_OPERATIONS to initialize &struct file_operations;
- * as illustrated below.
- *
- * .. code-block:: c
- *
- *     struct file_operations fops ={
- *             .owner = THIS_MODULE,
- *             DRM_VRAM_MM_FILE_OPERATION
- *     };
- *     struct drm_driver drv = {
- *             .driver_feature = DRM_ ... ,
- *             .fops = &fops,
- *             DRM_GEM_VRAM_DRIVER
- *     };
- *
- *     int init_drm_driver()
- *     {
- *             struct drm_device *dev;
- *             uint64_t vram_base;
- *             unsigned long vram_size;
- *             int ret;
- *
- *             // setup device, vram base and size
- *             // ...
- *
- *             ret = drm_vram_helper_alloc_mm(dev, vram_base, vram_size);
- *             if (ret)
- *                     return ret;
- *             return 0;
- *     }
- *
- * This creates an instance of &struct drm_vram_mm, exports DRM userspace
- * interfaces for GEM buffer management and initializes file operations to
- * allow for accessing created GEM buffers. With this setup, the DRM driver
- * manages an area of video RAM with VRAM MM and provides GEM VRAM objects
- * to userspace.
- *
- * To clean up the VRAM memory management, call drm_vram_helper_release_mm()
- * in the driver's clean-up code.
- *
- * .. code-block:: c
- *
- *     void fini_drm_driver()
- *     {
- *             struct drm_device *dev = ...;
- *
- *             drm_vram_helper_release_mm(dev);
- *     }
- *
- * For drawing or scanout operations, buffer object have to be pinned in video
- * RAM. Call drm_gem_vram_pin() with &DRM_GEM_VRAM_PL_FLAG_VRAM or
- * &DRM_GEM_VRAM_PL_FLAG_SYSTEM to pin a buffer object in video RAM or system
- * memory. Call drm_gem_vram_unpin() to release the pinned object afterwards.
- *
- * A buffer object that is pinned in video RAM has a fixed address within that
- * memory region. Call drm_gem_vram_offset() to retrieve this value. Typically
- * it's used to program the hardware's scanout engine for framebuffers, set
- * the cursor overlay's image for a mouse cursor, or use it as input to the
- * hardware's draing engine.
- *
- * To access a buffer object's memory from the DRM driver, call
- * drm_gem_vram_kmap(). It (optionally) maps the buffer into kernel address
- * space and returns the memory address. Use drm_gem_vram_kunmap() to
- * release the mapping.
- */
-
-MODULE_DESCRIPTION("DRM VRAM memory-management helpers");
-MODULE_LICENSE("GPL");
index 7585d8f68fb94ee7dfd55528132c48c2184de0e8..f9afe11c50f0c7c26eb44b0ab86791b6c25cdd0c 100644 (file)
@@ -231,21 +231,11 @@ static struct drm_info_list etnaviv_debugfs_list[] = {
                {"ring", show_each_gpu, 0, etnaviv_ring_show},
 };
 
-static int etnaviv_debugfs_init(struct drm_minor *minor)
+static void etnaviv_debugfs_init(struct drm_minor *minor)
 {
-       struct drm_device *dev = minor->dev;
-       int ret;
-
-       ret = drm_debugfs_create_files(etnaviv_debugfs_list,
-                       ARRAY_SIZE(etnaviv_debugfs_list),
-                       minor->debugfs_root, minor);
-
-       if (ret) {
-               dev_err(dev->dev, "could not install etnaviv_debugfs_list\n");
-               return ret;
-       }
-
-       return ret;
+       drm_debugfs_create_files(etnaviv_debugfs_list,
+                                ARRAY_SIZE(etnaviv_debugfs_list),
+                                minor->debugfs_root, minor);
 }
 #endif
 
index 5ee090691390f3f967c35d63ac41f85e96b0da8b..9ac51b6ab34b1dfa302efdc7c94af344fb49f759 100644 (file)
@@ -25,6 +25,7 @@
 #include <drm/drm_panel.h>
 #include <drm/drm_print.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 #include <drm/exynos_drm.h>
 
 #include "exynos_drm_crtc.h"
@@ -135,10 +136,6 @@ static const struct drm_encoder_helper_funcs exynos_dp_encoder_helper_funcs = {
        .disable = exynos_dp_nop,
 };
 
-static const struct drm_encoder_funcs exynos_dp_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static int exynos_dp_dt_parse_panel(struct exynos_dp_device *dp)
 {
        int ret;
@@ -167,8 +164,7 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
                        return ret;
        }
 
-       drm_encoder_init(drm_dev, encoder, &exynos_dp_encoder_funcs,
-                        DRM_MODE_ENCODER_TMDS, NULL);
+       drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_TMDS);
 
        drm_encoder_helper_add(encoder, &exynos_dp_encoder_helper_funcs);
 
index 43fa0f26c052e97bc87248660d827e48c0ac14d8..7ba5354e7d9446f97d094eb8112cb33d7f1fe197 100644 (file)
@@ -14,6 +14,7 @@
 #include <drm/drm_panel.h>
 #include <drm/drm_print.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include <video/of_videomode.h>
 #include <video/videomode.h>
@@ -149,10 +150,6 @@ static const struct drm_encoder_helper_funcs exynos_dpi_encoder_helper_funcs = {
        .disable = exynos_dpi_disable,
 };
 
-static const struct drm_encoder_funcs exynos_dpi_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 enum {
        FIMD_PORT_IN0,
        FIMD_PORT_IN1,
@@ -201,8 +198,7 @@ int exynos_dpi_bind(struct drm_device *dev, struct drm_encoder *encoder)
 {
        int ret;
 
-       drm_encoder_init(dev, encoder, &exynos_dpi_encoder_funcs,
-                        DRM_MODE_ENCODER_TMDS, NULL);
+       drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS);
 
        drm_encoder_helper_add(encoder, &exynos_dpi_encoder_helper_funcs);
 
index 57defeb445223b533df7e38b144182b733a78c11..dbd80f1e4c78bd44118acdc20449f7a4f82bd624 100644 (file)
@@ -76,7 +76,6 @@ static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
 }
 
 static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
-       .fault = exynos_drm_gem_fault,
        .open = drm_gem_vm_open,
        .close = drm_gem_vm_close,
 };
index e080aa92338c060252b985484067405e23e2c7d6..ee96a95fb6be50824d3483530d5e4ae14135d1df 100644 (file)
@@ -30,6 +30,7 @@
 #include <drm/drm_panel.h>
 #include <drm/drm_print.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "exynos_drm_crtc.h"
 #include "exynos_drm_drv.h"
 
 #define OLD_SCLK_MIPI_CLK_NAME "pll_clk"
 
-static char *clk_names[5] = { "bus_clk", "sclk_mipi",
+static const char *const clk_names[5] = { "bus_clk", "sclk_mipi",
        "phyclk_mipidphy0_bitclkdiv8", "phyclk_mipidphy0_rxclkesc0",
        "sclk_rgb_vclk_to_dsim0" };
 
@@ -1523,10 +1524,6 @@ static const struct drm_encoder_helper_funcs exynos_dsi_encoder_helper_funcs = {
        .disable = exynos_dsi_disable,
 };
 
-static const struct drm_encoder_funcs exynos_dsi_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 MODULE_DEVICE_TABLE(of, exynos_dsi_of_match);
 
 static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
@@ -1704,8 +1701,7 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
        struct drm_bridge *in_bridge;
        int ret;
 
-       drm_encoder_init(drm_dev, encoder, &exynos_dsi_encoder_funcs,
-                        DRM_MODE_ENCODER_TMDS, NULL);
+       drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_TMDS);
 
        drm_encoder_helper_add(encoder, &exynos_dsi_encoder_helper_funcs);
 
@@ -1763,10 +1759,6 @@ static int exynos_dsi_probe(struct platform_device *pdev)
        dsi->dev = dev;
        dsi->driver_data = of_device_get_match_data(dev);
 
-       ret = exynos_dsi_parse_dt(dsi);
-       if (ret)
-               return ret;
-
        dsi->supplies[0].supply = "vddcore";
        dsi->supplies[1].supply = "vddio";
        ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(dsi->supplies),
@@ -1813,10 +1805,8 @@ static int exynos_dsi_probe(struct platform_device *pdev)
        }
 
        dsi->irq = platform_get_irq(pdev, 0);
-       if (dsi->irq < 0) {
-               dev_err(dev, "failed to request dsi irq resource\n");
+       if (dsi->irq < 0)
                return dsi->irq;
-       }
 
        irq_set_status_flags(dsi->irq, IRQ_NOAUTOEN);
        ret = devm_request_threaded_irq(dev, dsi->irq, NULL,
@@ -1827,11 +1817,25 @@ static int exynos_dsi_probe(struct platform_device *pdev)
                return ret;
        }
 
+       ret = exynos_dsi_parse_dt(dsi);
+       if (ret)
+               return ret;
+
        platform_set_drvdata(pdev, &dsi->encoder);
 
        pm_runtime_enable(dev);
 
-       return component_add(dev, &exynos_dsi_component_ops);
+       ret = component_add(dev, &exynos_dsi_component_ops);
+       if (ret)
+               goto err_disable_runtime;
+
+       return 0;
+
+err_disable_runtime:
+       pm_runtime_disable(dev);
+       of_node_put(dsi->in_bridge_node);
+
+       return ret;
 }
 
 static int exynos_dsi_remove(struct platform_device *pdev)
index e6ceaf36fb044ab2081b871b83f5b7528cc348a2..56a2b47e1af79fd254e992abdd4d256fbd0dbe3e 100644 (file)
@@ -76,7 +76,6 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
        struct fb_info *fbi;
        struct drm_framebuffer *fb = helper->fb;
        unsigned int size = fb->width * fb->height * fb->format->cpp[0];
-       unsigned int nr_pages;
        unsigned long offset;
 
        fbi = drm_fb_helper_alloc_fbi(helper);
@@ -90,16 +89,6 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
 
        drm_fb_helper_fill_info(fbi, helper, sizes);
 
-       nr_pages = exynos_gem->size >> PAGE_SHIFT;
-
-       exynos_gem->kvaddr = (void __iomem *) vmap(exynos_gem->pages, nr_pages,
-                               VM_MAP, pgprot_writecombine(PAGE_KERNEL));
-       if (!exynos_gem->kvaddr) {
-               DRM_DEV_ERROR(to_dma_dev(helper->dev),
-                             "failed to map pages to kernel space.\n");
-               return -EIO;
-       }
-
        offset = fbi->var.xoffset * fb->format->cpp[0];
        offset += fbi->var.yoffset * fb->pitches[0];
 
@@ -133,18 +122,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
 
        size = mode_cmd.pitches[0] * mode_cmd.height;
 
-       exynos_gem = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG, size);
-       /*
-        * If physically contiguous memory allocation fails and if IOMMU is
-        * supported then try to get buffer from non physically contiguous
-        * memory area.
-        */
-       if (IS_ERR(exynos_gem) && is_drm_iommu_supported(dev)) {
-               dev_warn(dev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n");
-               exynos_gem = exynos_drm_gem_create(dev, EXYNOS_BO_NONCONTIG,
-                                                  size);
-       }
-
+       exynos_gem = exynos_drm_gem_create(dev, EXYNOS_BO_WC, size, true);
        if (IS_ERR(exynos_gem))
                return PTR_ERR(exynos_gem);
 
@@ -229,12 +207,8 @@ err_init:
 static void exynos_drm_fbdev_destroy(struct drm_device *dev,
                                      struct drm_fb_helper *fb_helper)
 {
-       struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(fb_helper);
-       struct exynos_drm_gem *exynos_gem = exynos_fbd->exynos_gem;
        struct drm_framebuffer *fb;
 
-       vunmap(exynos_gem->kvaddr);
-
        /* release drm framebuffer and real buffer */
        if (fb_helper->fb && fb_helper->fb->funcs) {
                fb = fb_helper->fb;
index d734d9d51762f8923685c76ab67ad135637eb4b6..0df57ee341441cb2a57dbe8007d2d3573d941c33 100644 (file)
 #include "exynos_drm_drv.h"
 #include "exynos_drm_gem.h"
 
-static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
+static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem, bool kvmap)
 {
        struct drm_device *dev = exynos_gem->base.dev;
-       unsigned long attr;
-       unsigned int nr_pages;
-       struct sg_table sgt;
-       int ret = -ENOMEM;
+       unsigned long attr = 0;
 
        if (exynos_gem->dma_addr) {
                DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "already allocated.\n");
                return 0;
        }
 
-       exynos_gem->dma_attrs = 0;
-
        /*
         * if EXYNOS_BO_CONTIG, fully physically contiguous memory
         * region will be allocated else physically contiguous
         * as possible.
         */
        if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG))
-               exynos_gem->dma_attrs |= DMA_ATTR_FORCE_CONTIGUOUS;
+               attr |= DMA_ATTR_FORCE_CONTIGUOUS;
 
        /*
         * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
@@ -46,61 +41,29 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
         */
        if (exynos_gem->flags & EXYNOS_BO_WC ||
                        !(exynos_gem->flags & EXYNOS_BO_CACHABLE))
-               attr = DMA_ATTR_WRITE_COMBINE;
+               attr |= DMA_ATTR_WRITE_COMBINE;
        else
-               attr = DMA_ATTR_NON_CONSISTENT;
-
-       exynos_gem->dma_attrs |= attr;
-       exynos_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
+               attr |= DMA_ATTR_NON_CONSISTENT;
 
-       nr_pages = exynos_gem->size >> PAGE_SHIFT;
-
-       exynos_gem->pages = kvmalloc_array(nr_pages, sizeof(struct page *),
-                       GFP_KERNEL | __GFP_ZERO);
-       if (!exynos_gem->pages) {
-               DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate pages.\n");
-               return -ENOMEM;
-       }
+       /* FBDev emulation requires kernel mapping */
+       if (!kvmap)
+               attr |= DMA_ATTR_NO_KERNEL_MAPPING;
 
+       exynos_gem->dma_attrs = attr;
        exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size,
                                             &exynos_gem->dma_addr, GFP_KERNEL,
                                             exynos_gem->dma_attrs);
        if (!exynos_gem->cookie) {
                DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate buffer.\n");
-               goto err_free;
-       }
-
-       ret = dma_get_sgtable_attrs(to_dma_dev(dev), &sgt, exynos_gem->cookie,
-                                   exynos_gem->dma_addr, exynos_gem->size,
-                                   exynos_gem->dma_attrs);
-       if (ret < 0) {
-               DRM_DEV_ERROR(to_dma_dev(dev), "failed to get sgtable.\n");
-               goto err_dma_free;
-       }
-
-       if (drm_prime_sg_to_page_addr_arrays(&sgt, exynos_gem->pages, NULL,
-                                            nr_pages)) {
-               DRM_DEV_ERROR(to_dma_dev(dev), "invalid sgtable.\n");
-               ret = -EINVAL;
-               goto err_sgt_free;
+               return -ENOMEM;
        }
 
-       sg_free_table(&sgt);
+       if (kvmap)
+               exynos_gem->kvaddr = exynos_gem->cookie;
 
        DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "dma_addr(0x%lx), size(0x%lx)\n",
                        (unsigned long)exynos_gem->dma_addr, exynos_gem->size);
-
        return 0;
-
-err_sgt_free:
-       sg_free_table(&sgt);
-err_dma_free:
-       dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
-                      exynos_gem->dma_addr, exynos_gem->dma_attrs);
-err_free:
-       kvfree(exynos_gem->pages);
-
-       return ret;
 }
 
 static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
@@ -118,8 +81,6 @@ static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
        dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
                        (dma_addr_t)exynos_gem->dma_addr,
                        exynos_gem->dma_attrs);
-
-       kvfree(exynos_gem->pages);
 }
 
 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
@@ -203,7 +164,8 @@ static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
 
 struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
                                             unsigned int flags,
-                                            unsigned long size)
+                                            unsigned long size,
+                                            bool kvmap)
 {
        struct exynos_drm_gem *exynos_gem;
        int ret;
@@ -237,7 +199,7 @@ struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
        /* set memory type and cache attribute from user side. */
        exynos_gem->flags = flags;
 
-       ret = exynos_drm_alloc_buf(exynos_gem);
+       ret = exynos_drm_alloc_buf(exynos_gem, kvmap);
        if (ret < 0) {
                drm_gem_object_release(&exynos_gem->base);
                kfree(exynos_gem);
@@ -254,7 +216,7 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
        struct exynos_drm_gem *exynos_gem;
        int ret;
 
-       exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size);
+       exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size, false);
        if (IS_ERR(exynos_gem))
                return PTR_ERR(exynos_gem);
 
@@ -365,7 +327,7 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
        else
                flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC;
 
-       exynos_gem = exynos_drm_gem_create(dev, flags, args->size);
+       exynos_gem = exynos_drm_gem_create(dev, flags, args->size, false);
        if (IS_ERR(exynos_gem)) {
                dev_warn(dev->dev, "FB allocation failed.\n");
                return PTR_ERR(exynos_gem);
@@ -381,26 +343,6 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
        return 0;
 }
 
-vm_fault_t exynos_drm_gem_fault(struct vm_fault *vmf)
-{
-       struct vm_area_struct *vma = vmf->vma;
-       struct drm_gem_object *obj = vma->vm_private_data;
-       struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
-       unsigned long pfn;
-       pgoff_t page_offset;
-
-       page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
-
-       if (page_offset >= (exynos_gem->size >> PAGE_SHIFT)) {
-               DRM_ERROR("invalid page offset\n");
-               return VM_FAULT_SIGBUS;
-       }
-
-       pfn = page_to_pfn(exynos_gem->pages[page_offset]);
-       return vmf_insert_mixed(vma, vmf->address,
-                       __pfn_to_pfn_t(pfn, PFN_DEV));
-}
-
 static int exynos_drm_gem_mmap_obj(struct drm_gem_object *obj,
                                   struct vm_area_struct *vma)
 {
@@ -462,11 +404,24 @@ struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev,
 struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
 {
        struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
-       int npages;
+       struct drm_device *drm_dev = obj->dev;
+       struct sg_table *sgt;
+       int ret;
 
-       npages = exynos_gem->size >> PAGE_SHIFT;
+       sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+       if (!sgt)
+               return ERR_PTR(-ENOMEM);
 
-       return drm_prime_pages_to_sg(exynos_gem->pages, npages);
+       ret = dma_get_sgtable_attrs(to_dma_dev(drm_dev), sgt, exynos_gem->cookie,
+                                   exynos_gem->dma_addr, exynos_gem->size,
+                                   exynos_gem->dma_attrs);
+       if (ret) {
+               DRM_ERROR("failed to get sgtable, %d\n", ret);
+               kfree(sgt);
+               return ERR_PTR(ret);
+       }
+
+       return sgt;
 }
 
 struct drm_gem_object *
@@ -475,52 +430,47 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
                                     struct sg_table *sgt)
 {
        struct exynos_drm_gem *exynos_gem;
-       int npages;
-       int ret;
-
-       exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size);
-       if (IS_ERR(exynos_gem)) {
-               ret = PTR_ERR(exynos_gem);
-               return ERR_PTR(ret);
-       }
 
-       exynos_gem->dma_addr = sg_dma_address(sgt->sgl);
+       if (sgt->nents < 1)
+               return ERR_PTR(-EINVAL);
 
-       npages = exynos_gem->size >> PAGE_SHIFT;
-       exynos_gem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
-       if (!exynos_gem->pages) {
-               ret = -ENOMEM;
-               goto err;
+       /*
+        * Check if the provided buffer has been mapped as contiguous
+        * into DMA address space.
+        */
+       if (sgt->nents > 1) {
+               dma_addr_t next_addr = sg_dma_address(sgt->sgl);
+               struct scatterlist *s;
+               unsigned int i;
+
+               for_each_sg(sgt->sgl, s, sgt->nents, i) {
+                       if (!sg_dma_len(s))
+                               break;
+                       if (sg_dma_address(s) != next_addr) {
+                               DRM_ERROR("buffer chunks must be mapped contiguously");
+                               return ERR_PTR(-EINVAL);
+                       }
+                       next_addr = sg_dma_address(s) + sg_dma_len(s);
+               }
        }
 
-       ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem->pages, NULL,
-                                              npages);
-       if (ret < 0)
-               goto err_free_large;
-
-       exynos_gem->sgt = sgt;
+       exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size);
+       if (IS_ERR(exynos_gem))
+               return ERR_CAST(exynos_gem);
 
-       if (sgt->nents == 1) {
-               /* always physically continuous memory if sgt->nents is 1. */
-               exynos_gem->flags |= EXYNOS_BO_CONTIG;
-       } else {
-               /*
-                * this case could be CONTIG or NONCONTIG type but for now
-                * sets NONCONTIG.
-                * TODO. we have to find a way that exporter can notify
-                * the type of its own buffer to importer.
-                */
+       /*
+        * Buffer has been mapped as contiguous into DMA address space,
+        * but if there is IOMMU, it can be either CONTIG or NONCONTIG.
+        * We assume a simplified logic below:
+        */
+       if (is_drm_iommu_supported(dev))
                exynos_gem->flags |= EXYNOS_BO_NONCONTIG;
-       }
+       else
+               exynos_gem->flags |= EXYNOS_BO_CONTIG;
 
+       exynos_gem->dma_addr = sg_dma_address(sgt->sgl);
+       exynos_gem->sgt = sgt;
        return &exynos_gem->base;
-
-err_free_large:
-       kvfree(exynos_gem->pages);
-err:
-       drm_gem_object_release(&exynos_gem->base);
-       kfree(exynos_gem);
-       return ERR_PTR(ret);
 }
 
 void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj)
index 42ec67bc262d4436ba40aefedcccd92ef1d09457..6ef001f890aa127dad03ea437fbfc4c3889cb77d 100644 (file)
  * @base: a gem object.
  *     - a new handle to this gem object would be created
  *     by drm_gem_handle_create().
- * @buffer: a pointer to exynos_drm_gem_buffer object.
- *     - contain the information to memory region allocated
- *     by user request or at framebuffer creation.
- *     continuous memory region allocated by user request
- *     or at framebuffer creation.
  * @flags: indicate memory type to allocated buffer and cache attruibute.
  * @size: size requested from user, in bytes and this size is aligned
  *     in page unit.
  * @cookie: cookie returned by dma_alloc_attrs
- * @kvaddr: kernel virtual address to allocated memory region.
+ * @kvaddr: kernel virtual address to allocated memory region (for fbdev)
  * @dma_addr: bus address(accessed by dma) to allocated memory region.
  *     - this address could be physical address without IOMMU and
  *     device address with IOMMU.
- * @pages: Array of backing pages.
+ * @dma_attrs: attrs passed dma mapping framework
  * @sgt: Imported sg_table.
  *
  * P.S. this object would be transferred to user as kms_bo.handle so
@@ -48,7 +43,6 @@ struct exynos_drm_gem {
        void __iomem            *kvaddr;
        dma_addr_t              dma_addr;
        unsigned long           dma_attrs;
-       struct page             **pages;
        struct sg_table         *sgt;
 };
 
@@ -58,7 +52,8 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem);
 /* create a new buffer with gem object */
 struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
                                             unsigned int flags,
-                                            unsigned long size);
+                                            unsigned long size,
+                                            bool kvmap);
 
 /*
  * request gem object creation and buffer allocation as the size
@@ -101,9 +96,6 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
                               struct drm_device *dev,
                               struct drm_mode_create_dumb *args);
 
-/* page fault handler and mmap fault address(virtual) to physical memory. */
-vm_fault_t exynos_drm_gem_fault(struct vm_fault *vmf);
-
 /* set vm_flags and we can change the vm attribute to other one at here. */
 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
 
index f41d75923557a23a0e1fcfc103aac0c76abb7277..a86abc173605e5d3840da55b0cafb6cedf903287 100644 (file)
@@ -88,7 +88,7 @@
 
 #define MIC_BS_SIZE_2D(x)      ((x) & 0x3fff)
 
-static char *clk_names[] = { "pclk_mic0", "sclk_rgb_vclk_to_mic0" };
+static const char *const clk_names[] = { "pclk_mic0", "sclk_rgb_vclk_to_mic0" };
 #define NUM_CLKS               ARRAY_SIZE(clk_names)
 static DEFINE_MUTEX(mic_mutex);
 
index dafa87b82052967ca27292c261008125a8e4d58d..2d94afba031e429e351ffe1d2e7d3cf1f99295b2 100644 (file)
@@ -293,10 +293,8 @@ static int rotator_probe(struct platform_device *pdev)
                return PTR_ERR(rot->regs);
 
        irq = platform_get_irq(pdev, 0);
-       if (irq < 0) {
-               dev_err(dev, "failed to get irq\n");
+       if (irq < 0)
                return irq;
-       }
 
        ret = devm_request_irq(dev, irq, rotator_irq_handler, 0, dev_name(dev),
                               rot);
index 93c43c8d914ee78a1b275b700c6486026958ce84..ce1857138f89301cdc1a134744aea46c91622abd 100644 (file)
@@ -502,10 +502,8 @@ static int scaler_probe(struct platform_device *pdev)
                return PTR_ERR(scaler->regs);
 
        irq = platform_get_irq(pdev, 0);
-       if (irq < 0) {
-               dev_err(dev, "failed to get irq\n");
+       if (irq < 0)
                return irq;
-       }
 
        ret = devm_request_threaded_irq(dev, irq, NULL, scaler_irq_handler,
                                        IRQF_ONESHOT, "drm_scaler", scaler);
index b320b3a21ad4f60e4efd373217861829cbdc363c..e5662bdcbbde3dd1babb055fa879ab50d82ef6d0 100644 (file)
@@ -14,6 +14,7 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 #include <drm/drm_vblank.h>
 #include <drm/exynos_drm.h>
 
@@ -213,6 +214,12 @@ static ssize_t vidi_store_connection(struct device *dev,
 static DEVICE_ATTR(connection, 0644, vidi_show_connection,
                        vidi_store_connection);
 
+static struct attribute *vidi_attrs[] = {
+       &dev_attr_connection.attr,
+       NULL,
+};
+ATTRIBUTE_GROUPS(vidi);
+
 int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
                                struct drm_file *file_priv)
 {
@@ -369,10 +376,6 @@ static const struct drm_encoder_helper_funcs exynos_vidi_encoder_helper_funcs =
        .disable = exynos_vidi_disable,
 };
 
-static const struct drm_encoder_funcs exynos_vidi_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static int vidi_bind(struct device *dev, struct device *master, void *data)
 {
        struct vidi_context *ctx = dev_get_drvdata(dev);
@@ -406,8 +409,7 @@ static int vidi_bind(struct device *dev, struct device *master, void *data)
                return PTR_ERR(ctx->crtc);
        }
 
-       drm_encoder_init(drm_dev, encoder, &exynos_vidi_encoder_funcs,
-                        DRM_MODE_ENCODER_TMDS, NULL);
+       drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_TMDS);
 
        drm_encoder_helper_add(encoder, &exynos_vidi_encoder_helper_funcs);
 
@@ -443,7 +445,6 @@ static int vidi_probe(struct platform_device *pdev)
 {
        struct vidi_context *ctx;
        struct device *dev = &pdev->dev;
-       int ret;
 
        ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
        if (!ctx)
@@ -457,23 +458,7 @@ static int vidi_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, ctx);
 
-       ret = device_create_file(dev, &dev_attr_connection);
-       if (ret < 0) {
-               DRM_DEV_ERROR(dev,
-                             "failed to create connection sysfs.\n");
-               return ret;
-       }
-
-       ret = component_add(dev, &vidi_component_ops);
-       if (ret)
-               goto err_remove_file;
-
-       return ret;
-
-err_remove_file:
-       device_remove_file(dev, &dev_attr_connection);
-
-       return ret;
+       return component_add(dev, &vidi_component_ops);
 }
 
 static int vidi_remove(struct platform_device *pdev)
@@ -498,5 +483,6 @@ struct platform_driver vidi_driver = {
        .driver         = {
                .name   = "exynos-drm-vidi",
                .owner  = THIS_MODULE,
+               .dev_groups = vidi_groups,
        },
 };
index 1a7c828fc41d8d723fe444a5b9bd35de3361b513..95dd399aa9ccb3da044acfc4a3a3e6d27198d6ae 100644 (file)
@@ -38,6 +38,7 @@
 #include <drm/drm_edid.h>
 #include <drm/drm_print.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "exynos_drm_crtc.h"
 #include "regs-hdmi.h"
@@ -1559,10 +1560,6 @@ static const struct drm_encoder_helper_funcs exynos_hdmi_encoder_helper_funcs =
        .disable        = hdmi_disable,
 };
 
-static const struct drm_encoder_funcs exynos_hdmi_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static void hdmi_audio_shutdown(struct device *dev, void *data)
 {
        struct hdmi_context *hdata = dev_get_drvdata(dev);
@@ -1843,8 +1840,7 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
 
        hdata->phy_clk.enable = hdmiphy_clk_enable;
 
-       drm_encoder_init(drm_dev, encoder, &exynos_hdmi_encoder_funcs,
-                        DRM_MODE_ENCODER_TMDS, NULL);
+       drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_TMDS);
 
        drm_encoder_helper_add(encoder, &exynos_hdmi_encoder_helper_funcs);
 
index 21b726baedeaa625a975d45a37fe3ef834baa154..c7e2e2ebc327bcf9beef2069c15fb52983655800 100644 (file)
@@ -1244,9 +1244,11 @@ static int mixer_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, ctx);
 
+       pm_runtime_enable(dev);
+
        ret = component_add(&pdev->dev, &mixer_component_ops);
-       if (!ret)
-               pm_runtime_enable(dev);
+       if (ret)
+               pm_runtime_disable(dev);
 
        return ret;
 }
index cff344367f81f49758394807627f5f68afd5857a..9b0c4736c21ae565a59a0e8cf98cf3e7d759cee6 100644 (file)
 #include <drm/drm_of.h>
 #include <drm/drm_panel.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "fsl_dcu_drm_drv.h"
 #include "fsl_tcon.h"
 
-static void fsl_dcu_drm_encoder_destroy(struct drm_encoder *encoder)
-{
-       drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs encoder_funcs = {
-       .destroy = fsl_dcu_drm_encoder_destroy,
-};
-
 int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev,
                               struct drm_crtc *crtc)
 {
@@ -38,8 +30,8 @@ int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev,
        if (fsl_dev->tcon)
                fsl_tcon_bypass_enable(fsl_dev->tcon);
 
-       ret = drm_encoder_init(fsl_dev->drm, encoder, &encoder_funcs,
-                              DRM_MODE_ENCODER_LVDS, NULL);
+       ret = drm_simple_encoder_init(fsl_dev->drm, encoder,
+                                     DRM_MODE_ENCODER_LVDS);
        if (ret < 0)
                return ret;
 
index 29c36d63b20e1d1d81ca1a9b599d75812e55aecc..88535f5aacc5d39aa0bc484a5cad41c5ae325701 100644 (file)
@@ -28,6 +28,8 @@
 #include <linux/i2c.h>
 #include <linux/pm_runtime.h>
 
+#include <drm/drm_simple_kms_helper.h>
+
 #include "cdv_device.h"
 #include "intel_bios.h"
 #include "power.h"
@@ -237,15 +239,6 @@ static const struct drm_connector_helper_funcs
        .best_encoder = gma_best_encoder,
 };
 
-static void cdv_intel_crt_enc_destroy(struct drm_encoder *encoder)
-{
-       drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs cdv_intel_crt_enc_funcs = {
-       .destroy = cdv_intel_crt_enc_destroy,
-};
-
 void cdv_intel_crt_init(struct drm_device *dev,
                        struct psb_intel_mode_device *mode_dev)
 {
@@ -271,8 +264,7 @@ void cdv_intel_crt_init(struct drm_device *dev,
                &cdv_intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
 
        encoder = &gma_encoder->base;
-       drm_encoder_init(dev, encoder,
-               &cdv_intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC, NULL);
+       drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DAC);
 
        gma_connector_attach_encoder(gma_connector, gma_encoder);
 
index 5772b2dce0d662170731de50b2400ed52feb4c6d..f41cbb753bb469ccc7dc10f4a4736f371f293004 100644 (file)
@@ -32,6 +32,7 @@
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_dp_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "gma_display.h"
 #include "psb_drv.h"
@@ -1271,37 +1272,8 @@ cdv_intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZ
        return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
 }
 
-
-#if 0
-static char    *voltage_names[] = {
-       "0.4V", "0.6V", "0.8V", "1.2V"
-};
-static char    *pre_emph_names[] = {
-       "0dB", "3.5dB", "6dB", "9.5dB"
-};
-static char    *link_train_names[] = {
-       "pattern 1", "pattern 2", "idle", "off"
-};
-#endif
-
 #define CDV_DP_VOLTAGE_MAX         DP_TRAIN_VOLTAGE_SWING_LEVEL_3
-/*
-static uint8_t
-cdv_intel_dp_pre_emphasis_max(uint8_t voltage_swing)
-{
-       switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
-       case DP_TRAIN_VOLTAGE_SWING_400:
-               return DP_TRAIN_PRE_EMPHASIS_6;
-       case DP_TRAIN_VOLTAGE_SWING_600:
-               return DP_TRAIN_PRE_EMPHASIS_6;
-       case DP_TRAIN_VOLTAGE_SWING_800:
-               return DP_TRAIN_PRE_EMPHASIS_3_5;
-       case DP_TRAIN_VOLTAGE_SWING_1200:
-       default:
-               return DP_TRAIN_PRE_EMPHASIS_0;
-       }
-}
-*/
+
 static void
 cdv_intel_get_adjust_train(struct gma_encoder *encoder)
 {
@@ -1908,11 +1880,6 @@ cdv_intel_dp_destroy(struct drm_connector *connector)
        kfree(connector);
 }
 
-static void cdv_intel_dp_encoder_destroy(struct drm_encoder *encoder)
-{
-       drm_encoder_cleanup(encoder);
-}
-
 static const struct drm_encoder_helper_funcs cdv_intel_dp_helper_funcs = {
        .dpms = cdv_intel_dp_dpms,
        .mode_fixup = cdv_intel_dp_mode_fixup,
@@ -1935,11 +1902,6 @@ static const struct drm_connector_helper_funcs cdv_intel_dp_connector_helper_fun
        .best_encoder = gma_best_encoder,
 };
 
-static const struct drm_encoder_funcs cdv_intel_dp_enc_funcs = {
-       .destroy = cdv_intel_dp_encoder_destroy,
-};
-
-
 static void cdv_intel_dp_add_properties(struct drm_connector *connector)
 {
        cdv_intel_attach_force_audio_property(connector);
@@ -2016,8 +1978,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
        encoder = &gma_encoder->base;
 
        drm_connector_init(dev, connector, &cdv_intel_dp_connector_funcs, type);
-       drm_encoder_init(dev, encoder, &cdv_intel_dp_enc_funcs,
-                        DRM_MODE_ENCODER_TMDS, NULL);
+       drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS);
 
        gma_connector_attach_encoder(gma_connector, gma_encoder);
 
@@ -2120,7 +2081,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
                if (ret == 0) {
                        /* if this fails, presume the device is a ghost */
                        DRM_INFO("failed to retrieve link info, disabling eDP\n");
-                       cdv_intel_dp_encoder_destroy(encoder);
+                       drm_encoder_cleanup(encoder);
                        cdv_intel_dp_destroy(connector);
                        goto err_priv;
                } else {
index 1711a41acc165414a5533048c745f18b2b274ea8..0d12c6ffbc4078796a9e1fcc56e3a583dba54786 100644 (file)
@@ -32,6 +32,7 @@
 #include <drm/drm.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_edid.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "cdv_device.h"
 #include "psb_drv.h"
@@ -311,8 +312,7 @@ void cdv_hdmi_init(struct drm_device *dev,
                           &cdv_hdmi_connector_funcs,
                           DRM_MODE_CONNECTOR_DVID);
 
-       drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
-                        DRM_MODE_ENCODER_TMDS, NULL);
+       drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS);
 
        gma_connector_attach_encoder(gma_connector, gma_encoder);
        gma_encoder->type = INTEL_OUTPUT_HDMI;
index ea0a5d9a0acc3111515cc5069d248f4c9509dba9..eaaf4efec21765a494c3bc2ef6750bc4cd080b74 100644 (file)
@@ -12,6 +12,8 @@
 #include <linux/i2c.h>
 #include <linux/pm_runtime.h>
 
+#include <drm/drm_simple_kms_helper.h>
+
 #include "cdv_device.h"
 #include "intel_bios.h"
 #include "power.h"
@@ -72,89 +74,6 @@ static u32 cdv_intel_lvds_get_max_backlight(struct drm_device *dev)
        return retval;
 }
 
-#if 0
-/*
- * Set LVDS backlight level by I2C command
- */
-static int cdv_lvds_i2c_set_brightness(struct drm_device *dev,
-                                       unsigned int level)
-{
-       struct drm_psb_private *dev_priv = dev->dev_private;
-       struct psb_intel_i2c_chan *lvds_i2c_bus = dev_priv->lvds_i2c_bus;
-       u8 out_buf[2];
-       unsigned int blc_i2c_brightness;
-
-       struct i2c_msg msgs[] = {
-               {
-                       .addr = lvds_i2c_bus->slave_addr,
-                       .flags = 0,
-                       .len = 2,
-                       .buf = out_buf,
-               }
-       };
-
-       blc_i2c_brightness = BRIGHTNESS_MASK & ((unsigned int)level *
-                            BRIGHTNESS_MASK /
-                            BRIGHTNESS_MAX_LEVEL);
-
-       if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
-               blc_i2c_brightness = BRIGHTNESS_MASK - blc_i2c_brightness;
-
-       out_buf[0] = dev_priv->lvds_bl->brightnesscmd;
-       out_buf[1] = (u8)blc_i2c_brightness;
-
-       if (i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1)
-               return 0;
-
-       DRM_ERROR("I2C transfer error\n");
-       return -1;
-}
-
-
-static int cdv_lvds_pwm_set_brightness(struct drm_device *dev, int level)
-{
-       struct drm_psb_private *dev_priv = dev->dev_private;
-
-       u32 max_pwm_blc;
-       u32 blc_pwm_duty_cycle;
-
-       max_pwm_blc = cdv_intel_lvds_get_max_backlight(dev);
-
-       /*BLC_PWM_CTL Should be initiated while backlight device init*/
-       BUG_ON((max_pwm_blc & PSB_BLC_MAX_PWM_REG_FREQ) == 0);
-
-       blc_pwm_duty_cycle = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
-
-       if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
-               blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle;
-
-       blc_pwm_duty_cycle &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
-       REG_WRITE(BLC_PWM_CTL,
-                 (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
-                 (blc_pwm_duty_cycle));
-
-       return 0;
-}
-
-/*
- * Set LVDS backlight level either by I2C or PWM
- */
-void cdv_intel_lvds_set_brightness(struct drm_device *dev, int level)
-{
-       struct drm_psb_private *dev_priv = dev->dev_private;
-
-       if (!dev_priv->lvds_bl) {
-               DRM_ERROR("NO LVDS Backlight Info\n");
-               return;
-       }
-
-       if (dev_priv->lvds_bl->type == BLC_I2C_TYPE)
-               cdv_lvds_i2c_set_brightness(dev, level);
-       else
-               cdv_lvds_pwm_set_brightness(dev, level);
-}
-#endif
-
 /**
  * Sets the backlight level.
  *
@@ -499,16 +418,6 @@ static const struct drm_connector_funcs cdv_intel_lvds_connector_funcs = {
        .destroy = cdv_intel_lvds_destroy,
 };
 
-
-static void cdv_intel_lvds_enc_destroy(struct drm_encoder *encoder)
-{
-       drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs cdv_intel_lvds_enc_funcs = {
-       .destroy = cdv_intel_lvds_enc_destroy,
-};
-
 /*
  * Enumerate the child dev array parsed from VBT to check whether
  * the LVDS is present.
@@ -616,10 +525,7 @@ void cdv_intel_lvds_init(struct drm_device *dev,
                           &cdv_intel_lvds_connector_funcs,
                           DRM_MODE_CONNECTOR_LVDS);
 
-       drm_encoder_init(dev, encoder,
-                        &cdv_intel_lvds_enc_funcs,
-                        DRM_MODE_ENCODER_LVDS, NULL);
-
+       drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS);
 
        gma_connector_attach_encoder(gma_connector, gma_encoder);
        gma_encoder->type = INTEL_OUTPUT_LVDS;
index 1d8f67e4795a685f2d720074c0c126e6a8492833..23a78d7553827862d512368cc372c2b88a24b238 100644 (file)
@@ -577,31 +577,31 @@ static void psb_setup_outputs(struct drm_device *dev)
                        break;
                case INTEL_OUTPUT_SDVO:
                        crtc_mask = dev_priv->ops->sdvo_mask;
-                       clone_mask = (1 << INTEL_OUTPUT_SDVO);
+                       clone_mask = 0;
                        break;
                case INTEL_OUTPUT_LVDS:
-                       crtc_mask = dev_priv->ops->lvds_mask;
-                       clone_mask = (1 << INTEL_OUTPUT_LVDS);
+                       crtc_mask = dev_priv->ops->lvds_mask;
+                       clone_mask = 0;
                        break;
                case INTEL_OUTPUT_MIPI:
                        crtc_mask = (1 << 0);
-                       clone_mask = (1 << INTEL_OUTPUT_MIPI);
+                       clone_mask = 0;
                        break;
                case INTEL_OUTPUT_MIPI2:
                        crtc_mask = (1 << 2);
-                       clone_mask = (1 << INTEL_OUTPUT_MIPI2);
+                       clone_mask = 0;
                        break;
                case INTEL_OUTPUT_HDMI:
-                       crtc_mask = dev_priv->ops->hdmi_mask;
+                       crtc_mask = dev_priv->ops->hdmi_mask;
                        clone_mask = (1 << INTEL_OUTPUT_HDMI);
                        break;
                case INTEL_OUTPUT_DISPLAYPORT:
                        crtc_mask = (1 << 0) | (1 << 1);
-                       clone_mask = (1 << INTEL_OUTPUT_DISPLAYPORT);
+                       clone_mask = 0;
                        break;
                case INTEL_OUTPUT_EDP:
                        crtc_mask = (1 << 1);
-                       clone_mask = (1 << INTEL_OUTPUT_EDP);
+                       clone_mask = 0;
                }
                encoder->possible_crtcs = crtc_mask;
                encoder->possible_clones =
index d4c65f26892219209fa9eb4beb5cd3bfecda342a..c976a9dd9240d907b38df1018818cac39f7d62cc 100644 (file)
@@ -27,6 +27,8 @@
 
 #include <linux/delay.h>
 
+#include <drm/drm_simple_kms_helper.h>
+
 #include "mdfld_dsi_dpi.h"
 #include "mdfld_dsi_pkg_sender.h"
 #include "mdfld_output.h"
@@ -993,10 +995,7 @@ struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
        /*create drm encoder object*/
        connector = &dsi_connector->base.base;
        encoder = &dpi_output->base.base.base;
-       drm_encoder_init(dev,
-                       encoder,
-                       p_funcs->encoder_funcs,
-                       DRM_MODE_ENCODER_LVDS, NULL);
+       drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS);
        drm_encoder_helper_add(encoder,
                                p_funcs->encoder_helper_funcs);
 
@@ -1006,10 +1005,10 @@ struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
        /*set possible crtcs and clones*/
        if (dsi_connector->pipe) {
                encoder->possible_crtcs = (1 << 2);
-               encoder->possible_clones = (1 << 1);
+               encoder->possible_clones = 0;
        } else {
                encoder->possible_crtcs = (1 << 0);
-               encoder->possible_clones = (1 << 0);
+               encoder->possible_clones = 0;
        }
 
        dsi_connector->base.encoder = &dpi_output->base.base;
index 4fff110c492124e8629cf0c979c61e0dd1c30b72..aae2d358364cc522546e170641b175fe89318f72 100644 (file)
@@ -658,16 +658,6 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
 
        dev_dbg(dev->dev, "pipe = 0x%x\n", pipe);
 
-#if 0
-       if (pipe == 1) {
-               if (!gma_power_begin(dev, true))
-                       return 0;
-               android_hdmi_crtc_mode_set(crtc, mode, adjusted_mode,
-                       x, y, old_fb);
-               goto mrst_crtc_mode_set_exit;
-       }
-#endif
-
        ret = check_fb(crtc->primary->fb);
        if (ret)
                return ret;
@@ -918,14 +908,6 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
                }
                dpll = 0;
 
-#if 0 /* FIXME revisit later */
-               if (ksel == KSEL_CRYSTAL_19 || ksel == KSEL_BYPASS_19 ||
-                                               ksel == KSEL_BYPASS_25)
-                       dpll &= ~MDFLD_INPUT_REF_SEL;
-               else if (ksel == KSEL_BYPASS_83_100)
-                       dpll |= MDFLD_INPUT_REF_SEL;
-#endif /* FIXME revisit later */
-
                if (is_hdmi)
                        dpll |= MDFLD_VCO_SEL;
 
@@ -935,20 +917,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
                /* compute bitmask from p1 value */
                dpll |= (1 << (clock.p1 - 2)) << 17;
 
-#if 0 /* 1080p30 & 720p */
-               dpll = 0x00050000;
-               fp = 0x000001be;
-#endif
-#if 0 /* 480p */
-               dpll = 0x02010000;
-               fp = 0x000000d2;
-#endif
        } else {
-#if 0 /*DBI_TPO_480x864*/
-               dpll = 0x00020000;
-               fp = 0x00000156;
-#endif /* DBI_TPO_480x864 */ /* get from spec. */
-
                dpll = 0x00800000;
                fp = 0x000000c1;
        }
index ab2b27c0f037bdfa431fa79a55aa7b3c92d4d713..17a944d70add3a4532eee94980bd49e80ed0f567 100644 (file)
@@ -51,7 +51,6 @@ struct panel_info {
 };
 
 struct panel_funcs {
-       const struct drm_encoder_funcs *encoder_funcs;
        const struct drm_encoder_helper_funcs *encoder_helper_funcs;
        struct drm_display_mode * (*get_config_mode)(struct drm_device *);
        int (*get_panel_info)(struct drm_device *, int, struct panel_info *);
index 49c92debb7b25137bf4dab95818de89c8c932bad..25e897b98f8621587b3d482aac668e490047ace4 100644 (file)
@@ -188,13 +188,7 @@ static const struct drm_encoder_helper_funcs
        .commit = mdfld_dsi_dpi_commit,
 };
 
-/*TPO DPI encoder funcs*/
-static const struct drm_encoder_funcs mdfld_tpo_dpi_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 const struct panel_funcs mdfld_tmd_vid_funcs = {
-       .encoder_funcs = &mdfld_tpo_dpi_encoder_funcs,
        .encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs,
        .get_config_mode = &tmd_vid_get_config_mode,
        .get_panel_info = tmd_vid_get_panel_info,
index a9420bf9a4198ef0f7dcd59f5d8564d4b45b28af..11845978fb0a68efa7679332aa7f534b5e5f845a 100644 (file)
@@ -76,13 +76,7 @@ static const struct drm_encoder_helper_funcs
        .commit = mdfld_dsi_dpi_commit,
 };
 
-/*TPO DPI encoder funcs*/
-static const struct drm_encoder_funcs mdfld_tpo_dpi_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 const struct panel_funcs mdfld_tpo_vid_funcs = {
-       .encoder_funcs = &mdfld_tpo_dpi_encoder_funcs,
        .encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs,
        .get_config_mode = &tpo_vid_get_config_mode,
        .get_panel_info = tpo_vid_get_panel_info,
index f4370232767d3d70ce7e65f8ae7af2d622f60419..a097a59a9eaec1929487f7661f7d4843b2f06d21 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/delay.h>
 
 #include <drm/drm.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "psb_drv.h"
 #include "psb_intel_drv.h"
@@ -620,15 +621,6 @@ static const struct drm_connector_funcs oaktrail_hdmi_connector_funcs = {
        .destroy = oaktrail_hdmi_destroy,
 };
 
-static void oaktrail_hdmi_enc_destroy(struct drm_encoder *encoder)
-{
-       drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs oaktrail_hdmi_enc_funcs = {
-       .destroy = oaktrail_hdmi_enc_destroy,
-};
-
 void oaktrail_hdmi_init(struct drm_device *dev,
                                        struct psb_intel_mode_device *mode_dev)
 {
@@ -651,9 +643,7 @@ void oaktrail_hdmi_init(struct drm_device *dev,
                           &oaktrail_hdmi_connector_funcs,
                           DRM_MODE_CONNECTOR_DVID);
 
-       drm_encoder_init(dev, encoder,
-                        &oaktrail_hdmi_enc_funcs,
-                        DRM_MODE_ENCODER_TMDS, NULL);
+       drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS);
 
        gma_connector_attach_encoder(gma_connector, gma_encoder);
 
@@ -673,11 +663,6 @@ failed_connector:
        kfree(gma_encoder);
 }
 
-static const struct pci_device_id hdmi_ids[] = {
-       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080d) },
-       { 0 }
-};
-
 void oaktrail_hdmi_setup(struct drm_device *dev)
 {
        struct drm_psb_private *dev_priv = dev->dev_private;
index 582e095975002de841d032a5f53f364ceebf6bc0..2828360153d16e6e19eaf53841cd42e905c83c13 100644 (file)
@@ -13,6 +13,8 @@
 
 #include <asm/intel-mid.h>
 
+#include <drm/drm_simple_kms_helper.h>
+
 #include "intel_bios.h"
 #include "power.h"
 #include "psb_drv.h"
@@ -311,8 +313,7 @@ void oaktrail_lvds_init(struct drm_device *dev,
                           &psb_intel_lvds_connector_funcs,
                           DRM_MODE_CONNECTOR_LVDS);
 
-       drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
-                        DRM_MODE_ENCODER_LVDS, NULL);
+       drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS);
 
        gma_connector_attach_encoder(gma_connector, gma_encoder);
        gma_encoder->type = INTEL_OUTPUT_LVDS;
index 16c6136f778b07c32bc7642e4ad2bb79a0b28e74..fb601983cef0f8e79245b48d3545f2790a9d2790 100644 (file)
@@ -252,7 +252,6 @@ extern int psb_intel_lvds_set_property(struct drm_connector *connector,
                                        struct drm_property *property,
                                        uint64_t value);
 extern void psb_intel_lvds_destroy(struct drm_connector *connector);
-extern const struct drm_encoder_funcs psb_intel_lvds_enc_funcs;
 
 /* intel_gmbus.c */
 extern void gma_intel_i2c_reset(struct drm_device *dev);
index afaebab7bc17f79d6f222b993ef2278b15892c23..063c66bb946d0c21872e0ad72db962d60ed565d5 100644 (file)
@@ -11,6 +11,8 @@
 #include <linux/i2c.h>
 #include <linux/pm_runtime.h>
 
+#include <drm/drm_simple_kms_helper.h>
+
 #include "intel_bios.h"
 #include "power.h"
 #include "psb_drv.h"
@@ -621,18 +623,6 @@ const struct drm_connector_funcs psb_intel_lvds_connector_funcs = {
        .destroy = psb_intel_lvds_destroy,
 };
 
-
-static void psb_intel_lvds_enc_destroy(struct drm_encoder *encoder)
-{
-       drm_encoder_cleanup(encoder);
-}
-
-const struct drm_encoder_funcs psb_intel_lvds_enc_funcs = {
-       .destroy = psb_intel_lvds_enc_destroy,
-};
-
-
-
 /**
  * psb_intel_lvds_init - setup LVDS connectors on this device
  * @dev: drm device
@@ -683,9 +673,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
                           &psb_intel_lvds_connector_funcs,
                           DRM_MODE_CONNECTOR_LVDS);
 
-       drm_encoder_init(dev, encoder,
-                        &psb_intel_lvds_enc_funcs,
-                        DRM_MODE_ENCODER_LVDS, NULL);
+       drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS);
 
        gma_connector_attach_encoder(gma_connector, gma_encoder);
        gma_encoder->type = INTEL_OUTPUT_LVDS;
index 264d7ad004b46ffd767d11073d736420fffa25ee..68fb3d7c172b276f63ffcbfaa2e77451ad525755 100644 (file)
@@ -864,36 +864,6 @@ static bool psb_intel_sdvo_set_avi_infoframe(struct psb_intel_sdvo *psb_intel_sd
        DRM_INFO("HDMI is not supported yet");
 
        return false;
-#if 0
-       struct dip_infoframe avi_if = {
-               .type = DIP_TYPE_AVI,
-               .ver = DIP_VERSION_AVI,
-               .len = DIP_LEN_AVI,
-       };
-       uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
-       uint8_t set_buf_index[2] = { 1, 0 };
-       uint64_t *data = (uint64_t *)&avi_if;
-       unsigned i;
-
-       intel_dip_infoframe_csum(&avi_if);
-
-       if (!psb_intel_sdvo_set_value(psb_intel_sdvo,
-                                 SDVO_CMD_SET_HBUF_INDEX,
-                                 set_buf_index, 2))
-               return false;
-
-       for (i = 0; i < sizeof(avi_if); i += 8) {
-               if (!psb_intel_sdvo_set_value(psb_intel_sdvo,
-                                         SDVO_CMD_SET_HBUF_DATA,
-                                         data, 8))
-                       return false;
-               data++;
-       }
-
-       return psb_intel_sdvo_set_value(psb_intel_sdvo,
-                                   SDVO_CMD_SET_HBUF_TXRATE,
-                                   &tx_rate, 1);
-#endif
 }
 
 static bool psb_intel_sdvo_set_tv_format(struct psb_intel_sdvo *psb_intel_sdvo)
@@ -1227,75 +1197,6 @@ static bool psb_intel_sdvo_get_capabilities(struct psb_intel_sdvo *psb_intel_sdv
        return true;
 }
 
-/* No use! */
-#if 0
-struct drm_connector* psb_intel_sdvo_find(struct drm_device *dev, int sdvoB)
-{
-       struct drm_connector *connector = NULL;
-       struct psb_intel_sdvo *iout = NULL;
-       struct psb_intel_sdvo *sdvo;
-
-       /* find the sdvo connector */
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               iout = to_psb_intel_sdvo(connector);
-
-               if (iout->type != INTEL_OUTPUT_SDVO)
-                       continue;
-
-               sdvo = iout->dev_priv;
-
-               if (sdvo->sdvo_reg == SDVOB && sdvoB)
-                       return connector;
-
-               if (sdvo->sdvo_reg == SDVOC && !sdvoB)
-                       return connector;
-
-       }
-
-       return NULL;
-}
-
-int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector)
-{
-       u8 response[2];
-       u8 status;
-       struct psb_intel_sdvo *psb_intel_sdvo;
-       DRM_DEBUG_KMS("\n");
-
-       if (!connector)
-               return 0;
-
-       psb_intel_sdvo = to_psb_intel_sdvo(connector);
-
-       return psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
-                                   &response, 2) && response[0];
-}
-
-void psb_intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
-{
-       u8 response[2];
-       u8 status;
-       struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(connector);
-
-       psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
-       psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
-
-       if (on) {
-               psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
-               status = psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
-
-               psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
-       } else {
-               response[0] = 0;
-               response[1] = 0;
-               psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
-       }
-
-       psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
-       psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
-}
-#endif
-
 static bool
 psb_intel_sdvo_multifunc_encoder(struct psb_intel_sdvo *psb_intel_sdvo)
 {
index 9e8224456ea29ac9ff62ba1747c28e118e2a57b9..e5bdd99ad453f4611e98411f03e9f226499044d8 100644 (file)
@@ -747,11 +747,11 @@ static int cmi_lcd_hack_create_device(void)
                return -EINVAL;
        }
 
-       client = i2c_new_device(adapter, &info);
-       if (!client) {
-               pr_err("%s: i2c_new_device() failed\n", __func__);
+       client = i2c_new_client_device(adapter, &info);
+       if (IS_ERR(client)) {
+               pr_err("%s: creating I2C device failed\n", __func__);
                i2c_put_adapter(adapter);
-               return -EINVAL;
+               return PTR_ERR(client);
        }
 
        return 0;
@@ -765,12 +765,7 @@ static const struct drm_encoder_helper_funcs tc35876x_encoder_helper_funcs = {
        .commit = mdfld_dsi_dpi_commit,
 };
 
-static const struct drm_encoder_funcs tc35876x_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 const struct panel_funcs mdfld_tc35876x_funcs = {
-       .encoder_funcs = &tc35876x_encoder_funcs,
        .encoder_helper_funcs = &tc35876x_encoder_helper_funcs,
        .get_config_mode = tc35876x_get_config_mode,
        .get_panel_info = tc35876x_get_panel_info,
index 55b46a7150a5e1b08983ef73f6beee24478ec282..cc70e836522f05532e12707194012f505c774933 100644 (file)
@@ -94,6 +94,10 @@ static int hibmc_plane_atomic_check(struct drm_plane *plane,
                return -EINVAL;
        }
 
+       if (state->fb->pitches[0] % 128 != 0) {
+               DRM_DEBUG_ATOMIC("wrong stride with 128-byte aligned\n");
+               return -EINVAL;
+       }
        return 0;
 }
 
@@ -119,11 +123,8 @@ static void hibmc_plane_atomic_update(struct drm_plane *plane,
        writel(gpu_addr, priv->mmio + HIBMC_CRT_FB_ADDRESS);
 
        reg = state->fb->width * (state->fb->format->cpp[0]);
-       /* now line_pad is 16 */
-       reg = PADDING(16, reg);
 
-       line_l = state->fb->width * state->fb->format->cpp[0];
-       line_l = PADDING(16, line_l);
+       line_l = state->fb->pitches[0];
        writel(HIBMC_FIELD(HIBMC_CRT_FB_WIDTH_WIDTH, reg) |
               HIBMC_FIELD(HIBMC_CRT_FB_WIDTH_OFFS, line_l),
               priv->mmio + HIBMC_CRT_FB_WIDTH);
index 222356a4f9a84d019d6fa6df5780ac98911fa105..a6fd0c29e5b89cc5c657cb2c81c8b8a165f10ca9 100644 (file)
@@ -94,7 +94,7 @@ static int hibmc_kms_init(struct hibmc_drm_private *priv)
        priv->dev->mode_config.max_height = 1200;
 
        priv->dev->mode_config.fb_base = priv->fb_base;
-       priv->dev->mode_config.preferred_depth = 24;
+       priv->dev->mode_config.preferred_depth = 32;
        priv->dev->mode_config.prefer_shadow = 1;
 
        priv->dev->mode_config.funcs = (void *)&hibmc_mode_funcs;
@@ -307,11 +307,7 @@ static int hibmc_load(struct drm_device *dev)
        /* reset all the states of crtc/plane/encoder/connector */
        drm_mode_config_reset(dev);
 
-       ret = drm_fbdev_generic_setup(dev, 16);
-       if (ret) {
-               DRM_ERROR("failed to initialize fbdev: %d\n", ret);
-               goto err;
-       }
+       drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth);
 
        return 0;
 
index 99397ac3b363723d3aa3e4cc579ca438fd9d52c7..322bd542e89d933174966dde854523235b268a53 100644 (file)
@@ -50,7 +50,7 @@ void hibmc_mm_fini(struct hibmc_drm_private *hibmc)
 int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
                      struct drm_mode_create_dumb *args)
 {
-       return drm_gem_vram_fill_create_dumb(file, dev, 0, 16, args);
+       return drm_gem_vram_fill_create_dumb(file, dev, 0, 128, args);
 }
 
 const struct drm_mode_config_funcs hibmc_mode_funcs = {
index f31068d74b18f3c56763466b013ba583aba7f59f..00e87c2907963ef65e0dda40ffa2d2634d26da38 100644 (file)
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_bridge.h>
 #include <drm/drm_device.h>
-#include <drm/drm_encoder_slave.h>
 #include <drm/drm_mipi_dsi.h>
 #include <drm/drm_of.h>
 #include <drm/drm_print.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "dw_dsi_reg.h"
 
@@ -696,10 +696,6 @@ static const struct drm_encoder_helper_funcs dw_encoder_helper_funcs = {
        .disable        = dsi_encoder_disable
 };
 
-static const struct drm_encoder_funcs dw_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static int dw_drm_encoder_init(struct device *dev,
                               struct drm_device *drm_dev,
                               struct drm_encoder *encoder)
@@ -713,8 +709,7 @@ static int dw_drm_encoder_init(struct device *dev,
        }
 
        encoder->possible_crtcs = crtc_mask;
-       ret = drm_encoder_init(drm_dev, encoder, &dw_encoder_funcs,
-                              DRM_MODE_ENCODER_DSI, NULL);
+       ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_DSI);
        if (ret) {
                DRM_ERROR("failed to init dsi encoder\n");
                return ret;
index 86000127d4eec54465ee2e4db8403c6f52cac117..c339e632522a91535c567287c9b1072de6870e4c 100644 (file)
@@ -940,7 +940,6 @@ static struct drm_driver ade_driver = {
 };
 
 struct kirin_drm_data ade_driver_data = {
-       .register_connects = false,
        .num_planes = ADE_CH_NUM,
        .prim_plane = ADE_CH1,
        .channel_formats = channel_formats,
index d3145ae877d74d766925d703326471d12134b17e..4349da3e2379c5b21d5403bd70dffc0aee7f832c 100644 (file)
@@ -219,40 +219,6 @@ static int kirin_drm_kms_cleanup(struct drm_device *dev)
        return 0;
 }
 
-static int kirin_drm_connectors_register(struct drm_device *dev)
-{
-       struct drm_connector *connector;
-       struct drm_connector *failed_connector;
-       struct drm_connector_list_iter conn_iter;
-       int ret;
-
-       mutex_lock(&dev->mode_config.mutex);
-       drm_connector_list_iter_begin(dev, &conn_iter);
-       drm_for_each_connector_iter(connector, &conn_iter) {
-               ret = drm_connector_register(connector);
-               if (ret) {
-                       failed_connector = connector;
-                       goto err;
-               }
-       }
-       drm_connector_list_iter_end(&conn_iter);
-       mutex_unlock(&dev->mode_config.mutex);
-
-       return 0;
-
-err:
-       drm_connector_list_iter_begin(dev, &conn_iter);
-       drm_for_each_connector_iter(connector, &conn_iter) {
-               if (failed_connector == connector)
-                       break;
-               drm_connector_unregister(connector);
-       }
-       drm_connector_list_iter_end(&conn_iter);
-       mutex_unlock(&dev->mode_config.mutex);
-
-       return ret;
-}
-
 static int kirin_drm_bind(struct device *dev)
 {
        struct kirin_drm_data *driver_data;
@@ -279,17 +245,8 @@ static int kirin_drm_bind(struct device *dev)
 
        drm_fbdev_generic_setup(drm_dev, 32);
 
-       /* connectors should be registered after drm device register */
-       if (driver_data->register_connects) {
-               ret = kirin_drm_connectors_register(drm_dev);
-               if (ret)
-                       goto err_drm_dev_unregister;
-       }
-
        return 0;
 
-err_drm_dev_unregister:
-       drm_dev_unregister(drm_dev);
 err_kms_cleanup:
        kirin_drm_kms_cleanup(drm_dev);
 err_drm_dev_put:
index 4d5c05a240652d865448f3d3e5943a9e2993572a..dee8ec2f7f2ec19247b16b13035e49d52c2cf8fd 100644 (file)
@@ -37,7 +37,6 @@ struct kirin_drm_data {
        u32 channel_formats_cnt;
        int config_max_width;
        int config_max_height;
-       bool register_connects;
        u32 num_planes;
        u32 prim_plane;
 
index a839f78a4c8a3037d8335330bfb86f033bc740a0..741886b5441973b19816a229a4a317dd0c14bf2a 100644 (file)
@@ -393,7 +393,7 @@ sil164_detect_slave(struct i2c_client *client)
                return NULL;
        }
 
-       return i2c_new_device(adap, &info);
+       return i2c_new_client_device(adap, &info);
 }
 
 static int
@@ -402,6 +402,7 @@ sil164_encoder_init(struct i2c_client *client,
                    struct drm_encoder_slave *encoder)
 {
        struct sil164_priv *priv;
+       struct i2c_client *slave_client;
 
        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
        if (!priv)
@@ -410,7 +411,9 @@ sil164_encoder_init(struct i2c_client *client,
        encoder->slave_priv = priv;
        encoder->slave_funcs = &sil164_encoder_funcs;
 
-       priv->duallink_slave = sil164_detect_slave(client);
+       slave_client = sil164_detect_slave(client);
+       if (!IS_ERR(slave_client))
+               priv->duallink_slave = slave_client;
 
        return 0;
 }
index c3332209f27a8dc982eafaaedbbd3dce9ac80935..9517f522dcb9c20b4dadd6b76451b1e9b7d3829d 100644 (file)
@@ -19,6 +19,7 @@
 #include <drm/drm_of.h>
 #include <drm/drm_print.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 #include <drm/i2c/tda998x.h>
 
 #include <media/cec-notifier.h>
@@ -1132,7 +1133,8 @@ static void tda998x_audio_shutdown(struct device *dev, void *data)
        mutex_unlock(&priv->audio_mutex);
 }
 
-int tda998x_audio_digital_mute(struct device *dev, void *data, bool enable)
+static int tda998x_audio_digital_mute(struct device *dev, void *data,
+                                     bool enable)
 {
        struct tda998x_priv *priv = dev_get_drvdata(dev);
 
@@ -1949,9 +1951,9 @@ static int tda998x_create(struct device *dev)
        cec_info.platform_data = &priv->cec_glue;
        cec_info.irq = client->irq;
 
-       priv->cec = i2c_new_device(client->adapter, &cec_info);
-       if (!priv->cec) {
-               ret = -ENODEV;
+       priv->cec = i2c_new_client_device(client->adapter, &cec_info);
+       if (IS_ERR(priv->cec)) {
+               ret = PTR_ERR(priv->cec);
                goto fail;
        }
 
@@ -1997,15 +1999,6 @@ err_irq:
 
 /* DRM encoder functions */
 
-static void tda998x_encoder_destroy(struct drm_encoder *encoder)
-{
-       drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs tda998x_encoder_funcs = {
-       .destroy = tda998x_encoder_destroy,
-};
-
 static int tda998x_encoder_init(struct device *dev, struct drm_device *drm)
 {
        struct tda998x_priv *priv = dev_get_drvdata(dev);
@@ -2023,8 +2016,8 @@ static int tda998x_encoder_init(struct device *dev, struct drm_device *drm)
 
        priv->encoder.possible_crtcs = crtcs;
 
-       ret = drm_encoder_init(drm, &priv->encoder, &tda998x_encoder_funcs,
-                              DRM_MODE_ENCODER_TMDS, NULL);
+       ret = drm_simple_encoder_init(drm, &priv->encoder,
+                                     DRM_MODE_ENCODER_TMDS);
        if (ret)
                goto err_encoder;
 
index 0bfd276c19fe60be6f0b75c533656ad7fa3dcbe2..35bbe2b805962802853f5a543bde56ab120d4239 100644 (file)
@@ -1,3 +1,15 @@
+config DRM_I915_FENCE_TIMEOUT
+       int "Timeout for unsignaled foreign fences (ms, jiffy granularity)"
+       default 10000 # milliseconds
+       help
+         When listening to a foreign fence, we install a supplementary timer
+         to ensure that we are always signaled and our userspace is able to
+         make forward progress. This value specifies the timeout used for an
+         unsignaled foreign fence.
+
+         May be 0 to disable the timeout, and rely on the foreign fence being
+         eventually signaled.
+
 config DRM_I915_USERFAULT_AUTOSUSPEND
        int "Runtime autosuspend delay for userspace GGTT mmaps (ms)"
        default 250 # milliseconds
index 6cd1f6253814c0911a643e7b35dfbe72fca64a6a..b0da6ea6e3f1f45c166c79cdf8e4492df41bbafa 100644 (file)
@@ -22,6 +22,7 @@ subdir-ccflags-y += $(call cc-disable-warning, sign-compare)
 subdir-ccflags-y += $(call cc-disable-warning, sometimes-uninitialized)
 subdir-ccflags-y += $(call cc-disable-warning, initializer-overrides)
 subdir-ccflags-y += $(call cc-disable-warning, uninitialized)
+subdir-ccflags-y += $(call cc-disable-warning, frame-address)
 subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
 
 # Fine grained warnings disable
@@ -34,6 +35,7 @@ subdir-ccflags-y += -I$(srctree)/$(src)
 
 # core driver code
 i915-y += i915_drv.o \
+         i915_config.o \
          i915_irq.o \
          i915_getparam.o \
          i915_params.o \
@@ -86,10 +88,12 @@ gt-y += \
        gt/intel_engine_cs.o \
        gt/intel_engine_heartbeat.o \
        gt/intel_engine_pm.o \
-       gt/intel_engine_pool.o \
        gt/intel_engine_user.o \
        gt/intel_ggtt.o \
+       gt/intel_ggtt_fencing.o \
        gt/intel_gt.o \
+       gt/intel_gt_buffer_pool.o \
+       gt/intel_gt_clock_utils.o \
        gt/intel_gt_irq.o \
        gt/intel_gt_pm.o \
        gt/intel_gt_pm_irq.o \
@@ -108,6 +112,7 @@ gt-y += \
        gt/intel_sseu.o \
        gt/intel_timeline.o \
        gt/intel_workarounds.o \
+       gt/shmem_utils.o \
        gt/sysfs_engines.o
 # autogenerated null render state
 gt-y += \
@@ -150,7 +155,6 @@ i915-y += \
          i915_buddy.o \
          i915_cmd_parser.o \
          i915_gem_evict.o \
-         i915_gem_fence_reg.o \
          i915_gem_gtt.o \
          i915_gem.o \
          i915_globals.o \
@@ -164,14 +168,18 @@ i915-y += \
 
 # general-purpose microcontroller (GuC) support
 i915-y += gt/uc/intel_uc.o \
+         gt/uc/intel_uc_debugfs.o \
          gt/uc/intel_uc_fw.o \
          gt/uc/intel_guc.o \
          gt/uc/intel_guc_ads.o \
          gt/uc/intel_guc_ct.o \
+         gt/uc/intel_guc_debugfs.o \
          gt/uc/intel_guc_fw.o \
          gt/uc/intel_guc_log.o \
+         gt/uc/intel_guc_log_debugfs.o \
          gt/uc/intel_guc_submission.o \
          gt/uc/intel_huc.o \
+         gt/uc/intel_huc_debugfs.o \
          gt/uc/intel_huc_fw.o
 
 # modesetting core code
@@ -240,23 +248,6 @@ i915-y += \
        display/vlv_dsi.o \
        display/vlv_dsi_pll.o
 
-# perf code
-i915-y += \
-       oa/i915_oa_hsw.o \
-       oa/i915_oa_bdw.o \
-       oa/i915_oa_chv.o \
-       oa/i915_oa_sklgt2.o \
-       oa/i915_oa_sklgt3.o \
-       oa/i915_oa_sklgt4.o \
-       oa/i915_oa_bxt.o \
-       oa/i915_oa_kblgt2.o \
-       oa/i915_oa_kblgt3.o \
-       oa/i915_oa_glk.o \
-       oa/i915_oa_cflgt2.o \
-       oa/i915_oa_cflgt3.o \
-       oa/i915_oa_cnl.o \
-       oa/i915_oa_icl.o \
-       oa/i915_oa_tgl.o
 i915-y += i915_perf.o
 
 # Post-mortem debug and GPU hang state capture
@@ -270,7 +261,8 @@ i915-$(CONFIG_DRM_I915_SELFTEST) += \
        selftests/igt_live_test.o \
        selftests/igt_mmap.o \
        selftests/igt_reset.o \
-       selftests/igt_spinner.o
+       selftests/igt_spinner.o \
+       selftests/librapl.o
 
 # virtual gpu code
 i915-y += i915_vgpu.o
index 17cee6f80d8be37d339b7f3555616365fa481ac5..4fec5bd649201d995dcd5e62ce46884190ecb663 100644 (file)
 #include "intel_panel.h"
 #include "intel_vdsc.h"
 
-static inline int header_credits_available(struct drm_i915_private *dev_priv,
-                                          enum transcoder dsi_trans)
+static int header_credits_available(struct drm_i915_private *dev_priv,
+                                   enum transcoder dsi_trans)
 {
        return (intel_de_read(dev_priv, DSI_CMD_TXCTL(dsi_trans)) & FREE_HEADER_CREDIT_MASK)
                >> FREE_HEADER_CREDIT_SHIFT;
 }
 
-static inline int payload_credits_available(struct drm_i915_private *dev_priv,
-                                           enum transcoder dsi_trans)
+static int payload_credits_available(struct drm_i915_private *dev_priv,
+                                    enum transcoder dsi_trans)
 {
        return (intel_de_read(dev_priv, DSI_CMD_TXCTL(dsi_trans)) & FREE_PLOAD_CREDIT_MASK)
                >> FREE_PLOAD_CREDIT_SHIFT;
@@ -186,16 +186,19 @@ static int dsi_send_pkt_hdr(struct intel_dsi_host *host,
 static int dsi_send_pkt_payld(struct intel_dsi_host *host,
                              struct mipi_dsi_packet pkt)
 {
+       struct intel_dsi *intel_dsi = host->intel_dsi;
+       struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
+
        /* payload queue can accept *256 bytes*, check limit */
        if (pkt.payload_length > MAX_PLOAD_CREDIT * 4) {
-               DRM_ERROR("payload size exceeds max queue limit\n");
+               drm_err(&i915->drm, "payload size exceeds max queue limit\n");
                return -1;
        }
 
        /* load data into command payload queue */
        if (!add_payld_to_queue(host, pkt.payload,
                                pkt.payload_length)) {
-               DRM_ERROR("adding payload to queue failed\n");
+               drm_err(&i915->drm, "adding payload to queue failed\n");
                return -1;
        }
 
@@ -744,6 +747,18 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
                                tmp |= VIDEO_MODE_SYNC_PULSE;
                                break;
                        }
+               } else {
+                       /*
+                        * FIXME: Retrieve this info from VBT.
+                        * As per the spec when dsi transcoder is operating
+                        * in TE GATE mode, TE comes from GPIO
+                        * which is UTIL PIN for DSI 0.
+                        * Also this GPIO would not be used for other
+                        * purposes is an assumption.
+                        */
+                       tmp &= ~OP_MODE_MASK;
+                       tmp |= CMD_MODE_TE_GATE;
+                       tmp |= TE_SOURCE_GPIO;
                }
 
                intel_de_write(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans), tmp);
@@ -837,14 +852,33 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
        }
 
        hactive = adjusted_mode->crtc_hdisplay;
-       htotal = DIV_ROUND_UP(adjusted_mode->crtc_htotal * mul, div);
+
+       if (is_vid_mode(intel_dsi))
+               htotal = DIV_ROUND_UP(adjusted_mode->crtc_htotal * mul, div);
+       else
+               htotal = DIV_ROUND_UP((hactive + 160) * mul, div);
+
        hsync_start = DIV_ROUND_UP(adjusted_mode->crtc_hsync_start * mul, div);
        hsync_end = DIV_ROUND_UP(adjusted_mode->crtc_hsync_end * mul, div);
        hsync_size  = hsync_end - hsync_start;
        hback_porch = (adjusted_mode->crtc_htotal -
                       adjusted_mode->crtc_hsync_end);
        vactive = adjusted_mode->crtc_vdisplay;
-       vtotal = adjusted_mode->crtc_vtotal;
+
+       if (is_vid_mode(intel_dsi)) {
+               vtotal = adjusted_mode->crtc_vtotal;
+       } else {
+               int bpp, line_time_us, byte_clk_period_ns;
+
+               if (crtc_state->dsc.compression_enable)
+                       bpp = crtc_state->dsc.compressed_bpp;
+               else
+                       bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+
+               byte_clk_period_ns = 1000000 / afe_clk(encoder, crtc_state);
+               line_time_us = (htotal * (bpp / 8) * byte_clk_period_ns) / (1000 * intel_dsi->lane_count);
+               vtotal = vactive + DIV_ROUND_UP(400, line_time_us);
+       }
        vsync_start = adjusted_mode->crtc_vsync_start;
        vsync_end = adjusted_mode->crtc_vsync_end;
        vsync_shift = hsync_start - htotal / 2;
@@ -873,7 +907,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
        }
 
        /* TRANS_HSYNC register to be programmed only for video mode */
-       if (intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE) {
+       if (is_vid_mode(intel_dsi)) {
                if (intel_dsi->video_mode_format ==
                    VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE) {
                        /* BSPEC: hsync size should be atleast 16 pixels */
@@ -916,22 +950,27 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
        if (vsync_start < vactive)
                drm_err(&dev_priv->drm, "vsync_start less than vactive\n");
 
-       /* program TRANS_VSYNC register */
-       for_each_dsi_port(port, intel_dsi->ports) {
-               dsi_trans = dsi_port_to_transcoder(port);
-               intel_de_write(dev_priv, VSYNC(dsi_trans),
-                              (vsync_start - 1) | ((vsync_end - 1) << 16));
+       /* program TRANS_VSYNC register for video mode only */
+       if (is_vid_mode(intel_dsi)) {
+               for_each_dsi_port(port, intel_dsi->ports) {
+                       dsi_trans = dsi_port_to_transcoder(port);
+                       intel_de_write(dev_priv, VSYNC(dsi_trans),
+                                      (vsync_start - 1) | ((vsync_end - 1) << 16));
+               }
        }
 
        /*
-        * FIXME: It has to be programmed only for interlaced
+        * FIXME: It has to be programmed only for video modes and interlaced
         * modes. Put the check condition here once interlaced
         * info available as described above.
         * program TRANS_VSYNCSHIFT register
         */
-       for_each_dsi_port(port, intel_dsi->ports) {
-               dsi_trans = dsi_port_to_transcoder(port);
-               intel_de_write(dev_priv, VSYNCSHIFT(dsi_trans), vsync_shift);
+       if (is_vid_mode(intel_dsi)) {
+               for_each_dsi_port(port, intel_dsi->ports) {
+                       dsi_trans = dsi_port_to_transcoder(port);
+                       intel_de_write(dev_priv, VSYNCSHIFT(dsi_trans),
+                                      vsync_shift);
+               }
        }
 
        /* program TRANS_VBLANK register, should be same as vtotal programmed */
@@ -1016,6 +1055,32 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder,
        }
 }
 
+static void gen11_dsi_config_util_pin(struct intel_encoder *encoder,
+                                     bool enable)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+       u32 tmp;
+
+       /*
+        * used as TE i/p for DSI0,
+        * for dual link/DSI1 TE is from slave DSI1
+        * through GPIO.
+        */
+       if (is_vid_mode(intel_dsi) || (intel_dsi->ports & BIT(PORT_B)))
+               return;
+
+       tmp = intel_de_read(dev_priv, UTIL_PIN_CTL);
+
+       if (enable) {
+               tmp |= UTIL_PIN_DIRECTION_INPUT;
+               tmp |= UTIL_PIN_ENABLE;
+       } else {
+               tmp &= ~UTIL_PIN_ENABLE;
+       }
+       intel_de_write(dev_priv, UTIL_PIN_CTL, tmp);
+}
+
 static void
 gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
                              const struct intel_crtc_state *crtc_state)
@@ -1037,6 +1102,9 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
        /* setup D-PHY timings */
        gen11_dsi_setup_dphy_timings(encoder, crtc_state);
 
+       /* Since transcoder is configured to take events from GPIO */
+       gen11_dsi_config_util_pin(encoder, true);
+
        /* step 4h: setup DSI protocol timeouts */
        gen11_dsi_setup_timeouts(encoder, crtc_state);
 
@@ -1088,7 +1156,8 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
        wait_for_cmds_dispatched_to_panel(encoder);
 }
 
-static void gen11_dsi_pre_pll_enable(struct intel_encoder *encoder,
+static void gen11_dsi_pre_pll_enable(struct intel_atomic_state *state,
+                                    struct intel_encoder *encoder,
                                     const struct intel_crtc_state *crtc_state,
                                     const struct drm_connector_state *conn_state)
 {
@@ -1099,7 +1168,8 @@ static void gen11_dsi_pre_pll_enable(struct intel_encoder *encoder,
        gen11_dsi_program_esc_clk_div(encoder, crtc_state);
 }
 
-static void gen11_dsi_pre_enable(struct intel_encoder *encoder,
+static void gen11_dsi_pre_enable(struct intel_atomic_state *state,
+                                struct intel_encoder *encoder,
                                 const struct intel_crtc_state *pipe_config,
                                 const struct drm_connector_state *conn_state)
 {
@@ -1118,13 +1188,14 @@ static void gen11_dsi_pre_enable(struct intel_encoder *encoder,
        gen11_dsi_set_transcoder_timings(encoder, pipe_config);
 }
 
-static void gen11_dsi_enable(struct intel_encoder *encoder,
+static void gen11_dsi_enable(struct intel_atomic_state *state,
+                            struct intel_encoder *encoder,
                             const struct intel_crtc_state *crtc_state,
                             const struct drm_connector_state *conn_state)
 {
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 
-       WARN_ON(crtc_state->has_pch_encoder);
+       drm_WARN_ON(state->base.dev, crtc_state->has_pch_encoder);
 
        /* step6d: enable dsi transcoder */
        gen11_dsi_enable_transcoder(encoder);
@@ -1180,6 +1251,15 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
        enum transcoder dsi_trans;
        u32 tmp;
 
+       /* disable periodic update mode */
+       if (is_cmd_mode(intel_dsi)) {
+               for_each_dsi_port(port, intel_dsi->ports) {
+                       tmp = intel_de_read(dev_priv, DSI_CMD_FRMCTL(port));
+                       tmp &= ~DSI_PERIODIC_FRAME_UPDATE_ENABLE;
+                       intel_de_write(dev_priv, DSI_CMD_FRMCTL(port), tmp);
+               }
+       }
+
        /* put dsi link in ULPS */
        for_each_dsi_port(port, intel_dsi->ports) {
                dsi_trans = dsi_port_to_transcoder(port);
@@ -1264,7 +1344,8 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
        }
 }
 
-static void gen11_dsi_disable(struct intel_encoder *encoder,
+static void gen11_dsi_disable(struct intel_atomic_state *state,
+                             struct intel_encoder *encoder,
                              const struct intel_crtc_state *old_crtc_state,
                              const struct drm_connector_state *old_conn_state)
 {
@@ -1286,11 +1367,14 @@ static void gen11_dsi_disable(struct intel_encoder *encoder,
        /* step3: disable port */
        gen11_dsi_disable_port(encoder);
 
+       gen11_dsi_config_util_pin(encoder, false);
+
        /* step4: disable IO power */
        gen11_dsi_disable_io_power(encoder);
 }
 
-static void gen11_dsi_post_disable(struct intel_encoder *encoder,
+static void gen11_dsi_post_disable(struct intel_atomic_state *state,
+                                  struct intel_encoder *encoder,
                                   const struct intel_crtc_state *old_crtc_state,
                                   const struct drm_connector_state *old_conn_state)
 {
@@ -1347,6 +1431,22 @@ static void gen11_dsi_get_timings(struct intel_encoder *encoder,
        adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vtotal;
 }
 
+static bool gen11_dsi_is_periodic_cmd_mode(struct intel_dsi *intel_dsi)
+{
+       struct drm_device *dev = intel_dsi->base.base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       enum transcoder dsi_trans;
+       u32 val;
+
+       if (intel_dsi->ports == BIT(PORT_B))
+               dsi_trans = TRANSCODER_DSI_1;
+       else
+               dsi_trans = TRANSCODER_DSI_0;
+
+       val = intel_de_read(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans));
+       return (val & DSI_PERIODIC_FRAME_UPDATE_ENABLE);
+}
+
 static void gen11_dsi_get_config(struct intel_encoder *encoder,
                                 struct intel_crtc_state *pipe_config)
 {
@@ -1367,6 +1467,10 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder,
        gen11_dsi_get_timings(encoder, pipe_config);
        pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
        pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc);
+
+       if (gen11_dsi_is_periodic_cmd_mode(intel_dsi))
+               pipe_config->hw.adjusted_mode.private_flags |=
+                                       I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE;
 }
 
 static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
@@ -1417,18 +1521,22 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
                                    struct intel_crtc_state *pipe_config,
                                    struct drm_connector_state *conn_state)
 {
+       struct drm_i915_private *i915 = to_i915(encoder->base.dev);
        struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
                                                   base);
        struct intel_connector *intel_connector = intel_dsi->attached_connector;
-       struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
        const struct drm_display_mode *fixed_mode =
-                                       intel_connector->panel.fixed_mode;
+               intel_connector->panel.fixed_mode;
        struct drm_display_mode *adjusted_mode =
-                                       &pipe_config->hw.adjusted_mode;
+               &pipe_config->hw.adjusted_mode;
+       int ret;
 
        pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
        intel_fixed_panel_mode(fixed_mode, adjusted_mode);
-       intel_pch_panel_fitting(crtc, pipe_config, conn_state->scaling_mode);
+
+       ret = intel_pch_panel_fitting(pipe_config, conn_state);
+       if (ret)
+               return ret;
 
        adjusted_mode->flags = 0;
 
@@ -1446,10 +1554,32 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
        pipe_config->clock_set = true;
 
        if (gen11_dsi_dsc_compute_config(encoder, pipe_config))
-               DRM_DEBUG_KMS("Attempting to use DSC failed\n");
+               drm_dbg_kms(&i915->drm, "Attempting to use DSC failed\n");
 
        pipe_config->port_clock = afe_clk(encoder, pipe_config) / 5;
 
+       /* We would not operate in periodic command mode */
+       pipe_config->hw.adjusted_mode.private_flags &=
+                                       ~I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE;
+
+       /*
+        * In case of TE GATE cmd mode, we
+        * receive TE from the slave if
+        * dual link is enabled
+        */
+       if (is_cmd_mode(intel_dsi)) {
+               if (intel_dsi->ports == (BIT(PORT_B) | BIT(PORT_A)))
+                       pipe_config->hw.adjusted_mode.private_flags |=
+                                               I915_MODE_FLAG_DSI_USE_TE1 |
+                                               I915_MODE_FLAG_DSI_USE_TE0;
+               else if (intel_dsi->ports == BIT(PORT_B))
+                       pipe_config->hw.adjusted_mode.private_flags |=
+                                               I915_MODE_FLAG_DSI_USE_TE1;
+               else
+                       pipe_config->hw.adjusted_mode.private_flags |=
+                                               I915_MODE_FLAG_DSI_USE_TE0;
+       }
+
        return 0;
 }
 
index 457b258683d300f5e4a8060c33837cc52f7b1d09..79032701873a2655be539deeb2d43ec4a57a41ba 100644 (file)
@@ -125,7 +125,7 @@ intel_plane_destroy_state(struct drm_plane *plane,
                          struct drm_plane_state *state)
 {
        struct intel_plane_state *plane_state = to_intel_plane_state(state);
-       WARN_ON(plane_state->vma);
+       drm_WARN_ON(plane->dev, plane_state->vma);
 
        __drm_atomic_helper_plane_destroy_state(&plane_state->uapi);
        if (plane_state->hw.fb)
@@ -264,6 +264,20 @@ void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
        plane_state->hw.color_range = from_plane_state->uapi.color_range;
 }
 
+void intel_plane_set_invisible(struct intel_crtc_state *crtc_state,
+                              struct intel_plane_state *plane_state)
+{
+       struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+
+       crtc_state->active_planes &= ~BIT(plane->id);
+       crtc_state->nv12_planes &= ~BIT(plane->id);
+       crtc_state->c8_planes &= ~BIT(plane->id);
+       crtc_state->data_rate[plane->id] = 0;
+       crtc_state->min_cdclk[plane->id] = 0;
+
+       plane_state->uapi.visible = false;
+}
+
 int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
                                        struct intel_crtc_state *new_crtc_state,
                                        const struct intel_plane_state *old_plane_state,
@@ -273,12 +287,7 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
        const struct drm_framebuffer *fb = new_plane_state->hw.fb;
        int ret;
 
-       new_crtc_state->active_planes &= ~BIT(plane->id);
-       new_crtc_state->nv12_planes &= ~BIT(plane->id);
-       new_crtc_state->c8_planes &= ~BIT(plane->id);
-       new_crtc_state->data_rate[plane->id] = 0;
-       new_crtc_state->min_cdclk[plane->id] = 0;
-       new_plane_state->uapi.visible = false;
+       intel_plane_set_invisible(new_crtc_state, new_plane_state);
 
        if (!new_plane_state->hw.crtc && !old_plane_state->hw.crtc)
                return 0;
@@ -387,7 +396,7 @@ skl_next_plane_to_commit(struct intel_atomic_state *state,
        }
 
        /* should never happen */
-       WARN_ON(1);
+       drm_WARN_ON(state->base.dev, 1);
 
        return NULL;
 }
index a6bbf42bae1f13c7d401ca385dee6df5175ed227..59dd1fbb02eaaa2f313e0a5a6c1824351ac364d2 100644 (file)
@@ -52,5 +52,7 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
 int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
                               struct intel_plane *plane,
                               bool *need_cdclk_calc);
+void intel_plane_set_invisible(struct intel_crtc_state *crtc_state,
+                              struct intel_plane_state *plane_state);
 
 #endif /* __INTEL_ATOMIC_PLANE_H__ */
index 62f234f641de60f6caa98cd1ac1fc299adb74102..ad4aa66fd676743b897753492eda3cb73e2cbab7 100644 (file)
@@ -252,14 +252,16 @@ static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_sta
                i = ARRAY_SIZE(hdmi_audio_clock);
 
        if (i == ARRAY_SIZE(hdmi_audio_clock)) {
-               DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n",
-                             adjusted_mode->crtc_clock);
+               drm_dbg_kms(&dev_priv->drm,
+                           "HDMI audio pixel clock setting for %d not found, falling back to defaults\n",
+                           adjusted_mode->crtc_clock);
                i = 1;
        }
 
-       DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n",
-                     hdmi_audio_clock[i].clock,
-                     hdmi_audio_clock[i].config);
+       drm_dbg_kms(&dev_priv->drm,
+                   "Configuring HDMI audio for pixel clock %d (0x%08x)\n",
+                   hdmi_audio_clock[i].clock,
+                   hdmi_audio_clock[i].config);
 
        return hdmi_audio_clock[i].config;
 }
@@ -512,6 +514,124 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder,
        mutex_unlock(&dev_priv->av_mutex);
 }
 
+static unsigned int calc_hblank_early_prog(struct intel_encoder *encoder,
+                                          const struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+       unsigned int link_clks_available, link_clks_required;
+       unsigned int tu_data, tu_line, link_clks_active;
+       unsigned int h_active, h_total, hblank_delta, pixel_clk;
+       unsigned int fec_coeff, cdclk, vdsc_bpp;
+       unsigned int link_clk, lanes;
+       unsigned int hblank_rise;
+
+       h_active = crtc_state->hw.adjusted_mode.crtc_hdisplay;
+       h_total = crtc_state->hw.adjusted_mode.crtc_htotal;
+       pixel_clk = crtc_state->hw.adjusted_mode.crtc_clock;
+       vdsc_bpp = crtc_state->dsc.compressed_bpp;
+       cdclk = i915->cdclk.hw.cdclk;
+       /* fec= 0.972261, using rounding multiplier of 1000000 */
+       fec_coeff = 972261;
+       link_clk = crtc_state->port_clock;
+       lanes = crtc_state->lane_count;
+
+       drm_dbg_kms(&i915->drm, "h_active = %u link_clk = %u :"
+                   "lanes = %u vdsc_bpp = %u cdclk = %u\n",
+                   h_active, link_clk, lanes, vdsc_bpp, cdclk);
+
+       if (WARN_ON(!link_clk || !pixel_clk || !lanes || !vdsc_bpp || !cdclk))
+               return 0;
+
+       link_clks_available = (h_total - h_active) * link_clk / pixel_clk - 28;
+       link_clks_required = DIV_ROUND_UP(192000 * h_total, 1000 * pixel_clk) * (48 / lanes + 2);
+
+       if (link_clks_available > link_clks_required)
+               hblank_delta = 32;
+       else
+               hblank_delta = DIV64_U64_ROUND_UP(mul_u32_u32(5 * (link_clk + cdclk), pixel_clk),
+                                                 mul_u32_u32(link_clk, cdclk));
+
+       tu_data = div64_u64(mul_u32_u32(pixel_clk * vdsc_bpp * 8, 1000000),
+                           mul_u32_u32(link_clk * lanes, fec_coeff));
+       tu_line = div64_u64(h_active * mul_u32_u32(link_clk, fec_coeff),
+                           mul_u32_u32(64 * pixel_clk, 1000000));
+       link_clks_active  = (tu_line - 1) * 64 + tu_data;
+
+       hblank_rise = (link_clks_active + 6 * DIV_ROUND_UP(link_clks_active, 250) + 4) * pixel_clk / link_clk;
+
+       return h_active - hblank_rise + hblank_delta;
+}
+
+static unsigned int calc_samples_room(const struct intel_crtc_state *crtc_state)
+{
+       unsigned int h_active, h_total, pixel_clk;
+       unsigned int link_clk, lanes;
+
+       h_active = crtc_state->hw.adjusted_mode.hdisplay;
+       h_total = crtc_state->hw.adjusted_mode.htotal;
+       pixel_clk = crtc_state->hw.adjusted_mode.clock;
+       link_clk = crtc_state->port_clock;
+       lanes = crtc_state->lane_count;
+
+       return ((h_total - h_active) * link_clk - 12 * pixel_clk) /
+               (pixel_clk * (48 / lanes + 2));
+}
+
+static void enable_audio_dsc_wa(struct intel_encoder *encoder,
+                               const struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+       enum pipe pipe = crtc->pipe;
+       unsigned int hblank_early_prog, samples_room;
+       unsigned int val;
+
+       if (INTEL_GEN(i915) < 11)
+               return;
+
+       val = intel_de_read(i915, AUD_CONFIG_BE);
+
+       if (INTEL_GEN(i915) == 11)
+               val |= HBLANK_EARLY_ENABLE_ICL(pipe);
+       else if (INTEL_GEN(i915) >= 12)
+               val |= HBLANK_EARLY_ENABLE_TGL(pipe);
+
+       if (crtc_state->dsc.compression_enable &&
+           (crtc_state->hw.adjusted_mode.hdisplay >= 3840 &&
+           crtc_state->hw.adjusted_mode.vdisplay >= 2160)) {
+               /* Get hblank early enable value required */
+               hblank_early_prog = calc_hblank_early_prog(encoder, crtc_state);
+               if (hblank_early_prog < 32) {
+                       val &= ~HBLANK_START_COUNT_MASK(pipe);
+                       val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_32);
+               } else if (hblank_early_prog < 64) {
+                       val &= ~HBLANK_START_COUNT_MASK(pipe);
+                       val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_64);
+               } else if (hblank_early_prog < 96) {
+                       val &= ~HBLANK_START_COUNT_MASK(pipe);
+                       val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_96);
+               } else {
+                       val &= ~HBLANK_START_COUNT_MASK(pipe);
+                       val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_128);
+               }
+
+               /* Get samples room value required */
+               samples_room = calc_samples_room(crtc_state);
+               if (samples_room < 3) {
+                       val &= ~NUMBER_SAMPLES_PER_LINE_MASK(pipe);
+                       val |= NUMBER_SAMPLES_PER_LINE(pipe, samples_room);
+               } else {
+                       /* Program 0 i.e "All Samples available in buffer" */
+                       val &= ~NUMBER_SAMPLES_PER_LINE_MASK(pipe);
+                       val |= NUMBER_SAMPLES_PER_LINE(pipe, 0x0);
+               }
+       }
+
+       intel_de_write(i915, AUD_CONFIG_BE, val);
+}
+
+#undef ROUNDING_FACTOR
+
 static void hsw_audio_codec_enable(struct intel_encoder *encoder,
                                   const struct intel_crtc_state *crtc_state,
                                   const struct drm_connector_state *conn_state)
@@ -529,6 +649,10 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
 
        mutex_lock(&dev_priv->av_mutex);
 
+       /* Enable Audio WA for 4k DSC usecases */
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP))
+               enable_audio_dsc_wa(encoder, crtc_state);
+
        /* Enable audio presence detect, invalidate ELD */
        tmp = intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD);
        tmp |= AUDIO_OUTPUT_ENABLE(cpu_transcoder);
@@ -891,7 +1015,7 @@ static unsigned long i915_audio_component_get_power(struct device *kdev)
        ret = intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
 
        if (dev_priv->audio_power_refcount++ == 0) {
-               if (IS_TIGERLAKE(dev_priv) || IS_ICELAKE(dev_priv)) {
+               if (INTEL_GEN(dev_priv) >= 9) {
                        intel_de_write(dev_priv, AUD_FREQ_CNTRL,
                                       dev_priv->audio_freq_cntrl);
                        drm_dbg_kms(&dev_priv->drm,
@@ -931,7 +1055,7 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
        unsigned long cookie;
        u32 tmp;
 
-       if (!IS_GEN(dev_priv, 9))
+       if (INTEL_GEN(dev_priv) < 9)
                return;
 
        cookie = i915_audio_component_get_power(kdev);
@@ -1136,6 +1260,10 @@ static void i915_audio_component_unbind(struct device *i915_kdev,
        drm_modeset_unlock_all(&dev_priv->drm);
 
        device_link_remove(hda_kdev, i915_kdev);
+
+       if (dev_priv->audio_power_refcount)
+               drm_err(&dev_priv->drm, "audio power refcount %d after unbind\n",
+                       dev_priv->audio_power_refcount);
 }
 
 static const struct component_ops i915_audio_component_bind_ops = {
@@ -1173,7 +1301,7 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv)
                return;
        }
 
-       if (IS_TIGERLAKE(dev_priv) || IS_ICELAKE(dev_priv)) {
+       if (INTEL_GEN(dev_priv) >= 9) {
                dev_priv->audio_freq_cntrl = intel_de_read(dev_priv,
                                                           AUD_FREQ_CNTRL);
                drm_dbg_kms(&dev_priv->drm,
index 58b264bc318d9fdb9e57326e40c1bf9b931e645b..fef04e2d954ed9c6f3aef03d81167331a992c174 100644 (file)
@@ -8,6 +8,9 @@
 #include "intel_bw.h"
 #include "intel_display_types.h"
 #include "intel_sideband.h"
+#include "intel_atomic.h"
+#include "intel_pm.h"
+
 
 /* Parameters for Qclk Geyserville (QGV) */
 struct intel_qgv_point {
@@ -113,6 +116,26 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
        return 0;
 }
 
+int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
+                                 u32 points_mask)
+{
+       int ret;
+
+       /* bspec says to keep retrying for at least 1 ms */
+       ret = skl_pcode_request(dev_priv, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
+                               points_mask,
+                               ICL_PCODE_POINTS_RESTRICTED_MASK,
+                               ICL_PCODE_POINTS_RESTRICTED,
+                               1);
+
+       if (ret < 0) {
+               drm_err(&dev_priv->drm, "Failed to disable qgv points (%d)\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
 static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
                              struct intel_qgv_info *qi)
 {
@@ -240,6 +263,16 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
                        break;
        }
 
+       /*
+        * In case if SAGV is disabled in BIOS, we always get 1
+        * SAGV point, but we can't send PCode commands to restrict it
+        * as it will fail and pointless anyway.
+        */
+       if (qi.num_points == 1)
+               dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
+       else
+               dev_priv->sagv_status = I915_SAGV_ENABLED;
+
        return 0;
 }
 
@@ -248,6 +281,11 @@ static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
 {
        int i;
 
+       /*
+        * Let's return max bw for 0 planes
+        */
+       num_planes = max(1, num_planes);
+
        for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) {
                const struct intel_bw_info *bi =
                        &dev_priv->max_bw[i];
@@ -277,34 +315,6 @@ void intel_bw_init_hw(struct drm_i915_private *dev_priv)
                icl_get_bw_info(dev_priv, &icl_sa_info);
 }
 
-static unsigned int intel_max_data_rate(struct drm_i915_private *dev_priv,
-                                       int num_planes)
-{
-       if (INTEL_GEN(dev_priv) >= 11) {
-               /*
-                * Any bw group has same amount of QGV points
-                */
-               const struct intel_bw_info *bi =
-                       &dev_priv->max_bw[0];
-               unsigned int min_bw = UINT_MAX;
-               int i;
-
-               /*
-                * FIXME with SAGV disabled maybe we can assume
-                * point 1 will always be used? Seems to match
-                * the behaviour observed in the wild.
-                */
-               for (i = 0; i < bi->num_qgv_points; i++) {
-                       unsigned int bw = icl_max_bw(dev_priv, num_planes, i);
-
-                       min_bw = min(bw, min_bw);
-               }
-               return min_bw;
-       } else {
-               return UINT_MAX;
-       }
-}
-
 static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_state *crtc_state)
 {
        /*
@@ -338,16 +348,17 @@ void intel_bw_crtc_update(struct intel_bw_state *bw_state,
                          const struct intel_crtc_state *crtc_state)
 {
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+       struct drm_i915_private *i915 = to_i915(crtc->base.dev);
 
        bw_state->data_rate[crtc->pipe] =
                intel_bw_crtc_data_rate(crtc_state);
        bw_state->num_active_planes[crtc->pipe] =
                intel_bw_crtc_num_active_planes(crtc_state);
 
-       DRM_DEBUG_KMS("pipe %c data rate %u num active planes %u\n",
-                     pipe_name(crtc->pipe),
-                     bw_state->data_rate[crtc->pipe],
-                     bw_state->num_active_planes[crtc->pipe]);
+       drm_dbg_kms(&i915->drm, "pipe %c data rate %u num active planes %u\n",
+                   pipe_name(crtc->pipe),
+                   bw_state->data_rate[crtc->pipe],
+                   bw_state->num_active_planes[crtc->pipe]);
 }
 
 static unsigned int intel_bw_num_active_planes(struct drm_i915_private *dev_priv,
@@ -374,7 +385,29 @@ static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv,
        return data_rate;
 }
 
-static struct intel_bw_state *
+struct intel_bw_state *
+intel_atomic_get_old_bw_state(struct intel_atomic_state *state)
+{
+       struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+       struct intel_global_state *bw_state;
+
+       bw_state = intel_atomic_get_old_global_obj_state(state, &dev_priv->bw_obj);
+
+       return to_intel_bw_state(bw_state);
+}
+
+struct intel_bw_state *
+intel_atomic_get_new_bw_state(struct intel_atomic_state *state)
+{
+       struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+       struct intel_global_state *bw_state;
+
+       bw_state = intel_atomic_get_new_global_obj_state(state, &dev_priv->bw_obj);
+
+       return to_intel_bw_state(bw_state);
+}
+
+struct intel_bw_state *
 intel_atomic_get_bw_state(struct intel_atomic_state *state)
 {
        struct drm_i915_private *dev_priv = to_i915(state->base.dev);
@@ -391,11 +424,16 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
 {
        struct drm_i915_private *dev_priv = to_i915(state->base.dev);
        struct intel_crtc_state *new_crtc_state, *old_crtc_state;
-       struct intel_bw_state *bw_state = NULL;
-       unsigned int data_rate, max_data_rate;
+       struct intel_bw_state *new_bw_state = NULL;
+       const struct intel_bw_state *old_bw_state = NULL;
+       unsigned int data_rate;
        unsigned int num_active_planes;
        struct intel_crtc *crtc;
        int i, ret;
+       u32 allowed_points = 0;
+       unsigned int max_bw_point = 0, max_bw = 0;
+       unsigned int num_qgv_points = dev_priv->max_bw[0].num_qgv_points;
+       u32 mask = (1 << num_qgv_points) - 1;
 
        /* FIXME earlier gens need some checks too */
        if (INTEL_GEN(dev_priv) < 11)
@@ -420,41 +458,93 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
                    old_active_planes == new_active_planes)
                        continue;
 
-               bw_state  = intel_atomic_get_bw_state(state);
-               if (IS_ERR(bw_state))
-                       return PTR_ERR(bw_state);
+               new_bw_state = intel_atomic_get_bw_state(state);
+               if (IS_ERR(new_bw_state))
+                       return PTR_ERR(new_bw_state);
 
-               bw_state->data_rate[crtc->pipe] = new_data_rate;
-               bw_state->num_active_planes[crtc->pipe] = new_active_planes;
+               new_bw_state->data_rate[crtc->pipe] = new_data_rate;
+               new_bw_state->num_active_planes[crtc->pipe] = new_active_planes;
 
                drm_dbg_kms(&dev_priv->drm,
                            "pipe %c data rate %u num active planes %u\n",
                            pipe_name(crtc->pipe),
-                           bw_state->data_rate[crtc->pipe],
-                           bw_state->num_active_planes[crtc->pipe]);
+                           new_bw_state->data_rate[crtc->pipe],
+                           new_bw_state->num_active_planes[crtc->pipe]);
        }
 
-       if (!bw_state)
+       if (!new_bw_state)
                return 0;
 
-       ret = intel_atomic_lock_global_state(&bw_state->base);
+       ret = intel_atomic_lock_global_state(&new_bw_state->base);
        if (ret)
                return ret;
 
-       data_rate = intel_bw_data_rate(dev_priv, bw_state);
-       num_active_planes = intel_bw_num_active_planes(dev_priv, bw_state);
+       data_rate = intel_bw_data_rate(dev_priv, new_bw_state);
+       data_rate = DIV_ROUND_UP(data_rate, 1000);
 
-       max_data_rate = intel_max_data_rate(dev_priv, num_active_planes);
+       num_active_planes = intel_bw_num_active_planes(dev_priv, new_bw_state);
 
-       data_rate = DIV_ROUND_UP(data_rate, 1000);
+       for (i = 0; i < num_qgv_points; i++) {
+               unsigned int max_data_rate;
 
-       if (data_rate > max_data_rate) {
-               drm_dbg_kms(&dev_priv->drm,
-                           "Bandwidth %u MB/s exceeds max available %d MB/s (%d active planes)\n",
-                           data_rate, max_data_rate, num_active_planes);
+               max_data_rate = icl_max_bw(dev_priv, num_active_planes, i);
+               /*
+                * We need to know which qgv point gives us
+                * maximum bandwidth in order to disable SAGV
+                * if we find that we exceed SAGV block time
+                * with watermarks. By that moment we already
+                * have those, as it is calculated earlier in
+                * intel_atomic_check,
+                */
+               if (max_data_rate > max_bw) {
+                       max_bw_point = i;
+                       max_bw = max_data_rate;
+               }
+               if (max_data_rate >= data_rate)
+                       allowed_points |= BIT(i);
+               drm_dbg_kms(&dev_priv->drm, "QGV point %d: max bw %d required %d\n",
+                           i, max_data_rate, data_rate);
+       }
+
+       /*
+        * BSpec states that we always should have at least one allowed point
+        * left, so if we couldn't - simply reject the configuration for obvious
+        * reasons.
+        */
+       if (allowed_points == 0) {
+               drm_dbg_kms(&dev_priv->drm, "No QGV points provide sufficient memory"
+                           " bandwidth %d for display configuration(%d active planes).\n",
+                           data_rate, num_active_planes);
                return -EINVAL;
        }
 
+       /*
+        * Leave only single point with highest bandwidth, if
+        * we can't enable SAGV due to the increased memory latency it may
+        * cause.
+        */
+       if (!intel_can_enable_sagv(dev_priv, new_bw_state)) {
+               allowed_points = BIT(max_bw_point);
+               drm_dbg_kms(&dev_priv->drm, "No SAGV, using single QGV point %d\n",
+                           max_bw_point);
+       }
+       /*
+        * We store the ones which need to be masked as that is what PCode
+        * actually accepts as a parameter.
+        */
+       new_bw_state->qgv_points_mask = ~allowed_points & mask;
+
+       old_bw_state = intel_atomic_get_old_bw_state(state);
+       /*
+        * If the actual mask had changed we need to make sure that
+        * the commits are serialized(in case this is a nomodeset, nonblocking)
+        */
+       if (new_bw_state->qgv_points_mask != old_bw_state->qgv_points_mask) {
+               ret = intel_atomic_serialize_global_state(&new_bw_state->base);
+               if (ret)
+                       return ret;
+       }
+
        return 0;
 }
 
index a8aa7624c5aaf3db7030cc3ad47b0c124a6a7ade..bbcaaa73ec1bf803c4e9c99e4e27f50d5038959c 100644 (file)
@@ -18,16 +18,43 @@ struct intel_crtc_state;
 struct intel_bw_state {
        struct intel_global_state base;
 
+       /*
+        * Contains a bit mask, used to determine, whether correspondent
+        * pipe allows SAGV or not.
+        */
+       u8 pipe_sagv_reject;
+
+       /*
+        * Current QGV points mask, which restricts
+        * some particular SAGV states, not to confuse
+        * with pipe_sagv_mask.
+        */
+       u8 qgv_points_mask;
+
        unsigned int data_rate[I915_MAX_PIPES];
        u8 num_active_planes[I915_MAX_PIPES];
+
+       /* bitmask of active pipes */
+       u8 active_pipes;
 };
 
 #define to_intel_bw_state(x) container_of((x), struct intel_bw_state, base)
 
+struct intel_bw_state *
+intel_atomic_get_old_bw_state(struct intel_atomic_state *state);
+
+struct intel_bw_state *
+intel_atomic_get_new_bw_state(struct intel_atomic_state *state);
+
+struct intel_bw_state *
+intel_atomic_get_bw_state(struct intel_atomic_state *state);
+
 void intel_bw_init_hw(struct drm_i915_private *dev_priv);
 int intel_bw_init(struct drm_i915_private *dev_priv);
 int intel_bw_atomic_check(struct intel_atomic_state *state);
 void intel_bw_crtc_update(struct intel_bw_state *bw_state,
                          const struct intel_crtc_state *crtc_state);
+int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
+                                 u32 points_mask);
 
 #endif /* __INTEL_BW_H__ */
index c1cce93a1c257482c1a0cbe7c9b86b4b9425ea17..98ece9cd7cddb2868d7c81187037ef80c262e9ff 100644 (file)
@@ -460,6 +460,16 @@ static void ilk_lut_10_pack(struct drm_color_lut *entry, u32 val)
        entry->blue = intel_color_lut_pack(REG_FIELD_GET(PREC_PALETTE_BLUE_MASK, val), 10);
 }
 
+static void icl_lut_multi_seg_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
+{
+       entry->red = REG_FIELD_GET(PAL_PREC_MULTI_SEG_RED_UDW_MASK, udw) << 6 |
+                                  REG_FIELD_GET(PAL_PREC_MULTI_SEG_RED_LDW_MASK, ldw);
+       entry->green = REG_FIELD_GET(PAL_PREC_MULTI_SEG_GREEN_UDW_MASK, udw) << 6 |
+                                    REG_FIELD_GET(PAL_PREC_MULTI_SEG_GREEN_LDW_MASK, ldw);
+       entry->blue = REG_FIELD_GET(PAL_PREC_MULTI_SEG_BLUE_UDW_MASK, udw) << 6 |
+                                   REG_FIELD_GET(PAL_PREC_MULTI_SEG_BLUE_LDW_MASK, ldw);
+}
+
 static void i9xx_color_commit(const struct intel_crtc_state *crtc_state)
 {
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -893,7 +903,7 @@ icl_load_gcmax(const struct intel_crtc_state *crtc_state,
        struct intel_dsb *dsb = intel_dsb_get(crtc);
        enum pipe pipe = crtc->pipe;
 
-       /* Fixme: LUT entries are 16 bit only, so we can prog 0xFFFF max */
+       /* FIXME LUT entries are 16 bit only, so we can prog 0xFFFF max */
        intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 0), color->red);
        intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 1), color->green);
        intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 2), color->blue);
@@ -1630,6 +1640,24 @@ static int glk_gamma_precision(const struct intel_crtc_state *crtc_state)
        }
 }
 
+static int icl_gamma_precision(const struct intel_crtc_state *crtc_state)
+{
+       if ((crtc_state->gamma_mode & POST_CSC_GAMMA_ENABLE) == 0)
+               return 0;
+
+       switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) {
+       case GAMMA_MODE_MODE_8BIT:
+               return 8;
+       case GAMMA_MODE_MODE_10BIT:
+               return 10;
+       case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
+               return 16;
+       default:
+               MISSING_CASE(crtc_state->gamma_mode);
+               return 0;
+       }
+}
+
 int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_state)
 {
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -1641,7 +1669,9 @@ int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_stat
                else
                        return i9xx_gamma_precision(crtc_state);
        } else {
-               if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+               if (INTEL_GEN(dev_priv) >= 11)
+                       return icl_gamma_precision(crtc_state);
+               else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
                        return glk_gamma_precision(crtc_state);
                else if (IS_IRONLAKE(dev_priv))
                        return ilk_gamma_precision(crtc_state);
@@ -1658,9 +1688,9 @@ static bool err_check(struct drm_color_lut *lut1,
                ((abs((long)lut2->green - lut1->green)) <= err);
 }
 
-static bool intel_color_lut_entry_equal(struct drm_color_lut *lut1,
-                                       struct drm_color_lut *lut2,
-                                       int lut_size, u32 err)
+static bool intel_color_lut_entries_equal(struct drm_color_lut *lut1,
+                                         struct drm_color_lut *lut2,
+                                         int lut_size, u32 err)
 {
        int i;
 
@@ -1690,16 +1720,8 @@ bool intel_color_lut_equal(struct drm_property_blob *blob1,
        lut_size2 = drm_color_lut_size(blob2);
 
        /* check sw and hw lut size */
-       switch (gamma_mode) {
-       case GAMMA_MODE_MODE_8BIT:
-       case GAMMA_MODE_MODE_10BIT:
-               if (lut_size1 != lut_size2)
-                       return false;
-               break;
-       default:
-               MISSING_CASE(gamma_mode);
-                       return false;
-       }
+       if (lut_size1 != lut_size2)
+               return false;
 
        lut1 = blob1->data;
        lut2 = blob2->data;
@@ -1707,11 +1729,16 @@ bool intel_color_lut_equal(struct drm_property_blob *blob1,
        err = 0xffff >> bit_precision;
 
        /* check sw and hw lut entry to be equal */
-       switch (gamma_mode) {
+       switch (gamma_mode & GAMMA_MODE_MODE_MASK) {
        case GAMMA_MODE_MODE_8BIT:
        case GAMMA_MODE_MODE_10BIT:
-               if (!intel_color_lut_entry_equal(lut1, lut2,
-                                                lut_size2, err))
+               if (!intel_color_lut_entries_equal(lut1, lut2,
+                                                  lut_size2, err))
+                       return false;
+               break;
+       case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
+               if (!intel_color_lut_entries_equal(lut1, lut2,
+                                                  9, err))
                        return false;
                break;
        default:
@@ -1946,6 +1973,63 @@ static void glk_read_luts(struct intel_crtc_state *crtc_state)
                crtc_state->hw.gamma_lut = glk_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0));
 }
 
+static struct drm_property_blob *
+icl_read_lut_multi_segment(struct intel_crtc *crtc)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+       enum pipe pipe = crtc->pipe;
+       struct drm_property_blob *blob;
+       struct drm_color_lut *lut;
+
+       blob = drm_property_create_blob(&dev_priv->drm,
+                                       sizeof(struct drm_color_lut) * lut_size,
+                                       NULL);
+       if (IS_ERR(blob))
+               return NULL;
+
+       lut = blob->data;
+
+       intel_de_write(dev_priv, PREC_PAL_MULTI_SEG_INDEX(pipe),
+                      PAL_PREC_AUTO_INCREMENT);
+
+       for (i = 0; i < 9; i++) {
+               u32 ldw = intel_de_read(dev_priv, PREC_PAL_MULTI_SEG_DATA(pipe));
+               u32 udw = intel_de_read(dev_priv, PREC_PAL_MULTI_SEG_DATA(pipe));
+
+               icl_lut_multi_seg_pack(&lut[i], ldw, udw);
+       }
+
+       intel_de_write(dev_priv, PREC_PAL_MULTI_SEG_INDEX(pipe), 0);
+
+       /*
+        * FIXME readouts from PAL_PREC_DATA register aren't giving
+        * correct values in the case of fine and coarse segments.
+        * Restricting readouts only for super fine segment as of now.
+        */
+
+       return blob;
+}
+
+static void icl_read_luts(struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+       if ((crtc_state->gamma_mode & POST_CSC_GAMMA_ENABLE) == 0)
+               return;
+
+       switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) {
+       case GAMMA_MODE_MODE_8BIT:
+               crtc_state->hw.gamma_lut = ilk_read_lut_8(crtc);
+               break;
+       case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
+               crtc_state->hw.gamma_lut = icl_read_lut_multi_segment(crtc);
+               break;
+       default:
+               crtc_state->hw.gamma_lut = glk_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0));
+       }
+}
+
 void intel_color_init(struct intel_crtc *crtc)
 {
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -1989,6 +2073,7 @@ void intel_color_init(struct intel_crtc *crtc)
 
                if (INTEL_GEN(dev_priv) >= 11) {
                        dev_priv->display.load_luts = icl_load_luts;
+                       dev_priv->display.read_luts = icl_read_luts;
                } else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) {
                        dev_priv->display.load_luts = glk_load_luts;
                        dev_priv->display.read_luts = glk_read_luts;
index 903e49659f561d289852c1b9f4094ab9cbdef37e..406e96785c763760cf422f9cbcb72cf5c8a721e2 100644 (file)
@@ -33,6 +33,7 @@
 
 #include "i915_drv.h"
 #include "intel_connector.h"
+#include "intel_display_debugfs.h"
 #include "intel_display_types.h"
 #include "intel_hdcp.h"
 
@@ -123,6 +124,8 @@ int intel_connector_register(struct drm_connector *connector)
                goto err_backlight;
        }
 
+       intel_connector_debugfs_add(connector);
+
        return 0;
 
 err_backlight:
@@ -290,7 +293,7 @@ intel_attach_colorspace_property(struct drm_connector *connector)
                        return;
                break;
        default:
-               DRM_DEBUG_KMS("Colorspace property not supported\n");
+               MISSING_CASE(connector->connector_type);
                return;
        }
 
index 78f9b6cde810148dbef367eff5693eb7646531d7..2f5b9a4baafdb9a5aac5c7724597488afd6eeefa 100644 (file)
@@ -203,27 +203,31 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
        intel_de_write(dev_priv, crt->adpa_reg, adpa);
 }
 
-static void intel_disable_crt(struct intel_encoder *encoder,
+static void intel_disable_crt(struct intel_atomic_state *state,
+                             struct intel_encoder *encoder,
                              const struct intel_crtc_state *old_crtc_state,
                              const struct drm_connector_state *old_conn_state)
 {
        intel_crt_set_dpms(encoder, old_crtc_state, DRM_MODE_DPMS_OFF);
 }
 
-static void pch_disable_crt(struct intel_encoder *encoder,
+static void pch_disable_crt(struct intel_atomic_state *state,
+                           struct intel_encoder *encoder,
                            const struct intel_crtc_state *old_crtc_state,
                            const struct drm_connector_state *old_conn_state)
 {
 }
 
-static void pch_post_disable_crt(struct intel_encoder *encoder,
+static void pch_post_disable_crt(struct intel_atomic_state *state,
+                                struct intel_encoder *encoder,
                                 const struct intel_crtc_state *old_crtc_state,
                                 const struct drm_connector_state *old_conn_state)
 {
-       intel_disable_crt(encoder, old_crtc_state, old_conn_state);
+       intel_disable_crt(state, encoder, old_crtc_state, old_conn_state);
 }
 
-static void hsw_disable_crt(struct intel_encoder *encoder,
+static void hsw_disable_crt(struct intel_atomic_state *state,
+                           struct intel_encoder *encoder,
                            const struct intel_crtc_state *old_crtc_state,
                            const struct drm_connector_state *old_conn_state)
 {
@@ -234,7 +238,8 @@ static void hsw_disable_crt(struct intel_encoder *encoder,
        intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
 }
 
-static void hsw_post_disable_crt(struct intel_encoder *encoder,
+static void hsw_post_disable_crt(struct intel_atomic_state *state,
+                                struct intel_encoder *encoder,
                                 const struct intel_crtc_state *old_crtc_state,
                                 const struct drm_connector_state *old_conn_state)
 {
@@ -250,19 +255,20 @@ static void hsw_post_disable_crt(struct intel_encoder *encoder,
 
        intel_ddi_disable_pipe_clock(old_crtc_state);
 
-       pch_post_disable_crt(encoder, old_crtc_state, old_conn_state);
+       pch_post_disable_crt(state, encoder, old_crtc_state, old_conn_state);
 
        lpt_disable_pch_transcoder(dev_priv);
        lpt_disable_iclkip(dev_priv);
 
-       intel_ddi_fdi_post_disable(encoder, old_crtc_state, old_conn_state);
+       intel_ddi_fdi_post_disable(state, encoder, old_crtc_state, old_conn_state);
 
        drm_WARN_ON(&dev_priv->drm, !old_crtc_state->has_pch_encoder);
 
        intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
 }
 
-static void hsw_pre_pll_enable_crt(struct intel_encoder *encoder,
+static void hsw_pre_pll_enable_crt(struct intel_atomic_state *state,
+                                  struct intel_encoder *encoder,
                                   const struct intel_crtc_state *crtc_state,
                                   const struct drm_connector_state *conn_state)
 {
@@ -273,7 +279,8 @@ static void hsw_pre_pll_enable_crt(struct intel_encoder *encoder,
        intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
 }
 
-static void hsw_pre_enable_crt(struct intel_encoder *encoder,
+static void hsw_pre_enable_crt(struct intel_atomic_state *state,
+                              struct intel_encoder *encoder,
                               const struct intel_crtc_state *crtc_state,
                               const struct drm_connector_state *conn_state)
 {
@@ -287,10 +294,11 @@ static void hsw_pre_enable_crt(struct intel_encoder *encoder,
 
        hsw_fdi_link_train(encoder, crtc_state);
 
-       intel_ddi_enable_pipe_clock(crtc_state);
+       intel_ddi_enable_pipe_clock(encoder, crtc_state);
 }
 
-static void hsw_enable_crt(struct intel_encoder *encoder,
+static void hsw_enable_crt(struct intel_atomic_state *state,
+                          struct intel_encoder *encoder,
                           const struct intel_crtc_state *crtc_state,
                           const struct drm_connector_state *conn_state)
 {
@@ -300,6 +308,8 @@ static void hsw_enable_crt(struct intel_encoder *encoder,
 
        drm_WARN_ON(&dev_priv->drm, !crtc_state->has_pch_encoder);
 
+       intel_ddi_enable_transcoder_func(encoder, crtc_state);
+
        intel_enable_pipe(crtc_state);
 
        lpt_pch_enable(crtc_state);
@@ -314,7 +324,8 @@ static void hsw_enable_crt(struct intel_encoder *encoder,
        intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
 }
 
-static void intel_enable_crt(struct intel_encoder *encoder,
+static void intel_enable_crt(struct intel_atomic_state *state,
+                            struct intel_encoder *encoder,
                             const struct intel_crtc_state *crtc_state,
                             const struct drm_connector_state *conn_state)
 {
@@ -594,7 +605,8 @@ static struct edid *intel_crt_get_edid(struct drm_connector *connector,
        edid = drm_get_edid(connector, i2c);
 
        if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
-               DRM_DEBUG_KMS("CRT GMBUS EDID read failed, retry using GPIO bit-banging\n");
+               drm_dbg_kms(connector->dev,
+                           "CRT GMBUS EDID read failed, retry using GPIO bit-banging\n");
                intel_gmbus_force_bit(i2c, true);
                edid = drm_get_edid(connector, i2c);
                intel_gmbus_force_bit(i2c, false);
index 52db7852827bfddc8c2b732fd00c8e5aedb09ee8..aa22465bb56e755be0cdcfcdfe3cab3b300aa019 100644 (file)
@@ -568,7 +568,7 @@ static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi[] = {
        { 0x6, 0x7F, 0x35, 0x00, 0x0A },        /* 600   850      3.0   */
 };
 
-static const struct cnl_ddi_buf_trans ehl_combo_phy_ddi_translations_hbr2_hbr3[] = {
+static const struct cnl_ddi_buf_trans ehl_combo_phy_ddi_translations_dp[] = {
                                                /* NT mV Trans mV db    */
        { 0xA, 0x33, 0x3F, 0x00, 0x00 },        /* 350   350      0.0   */
        { 0xA, 0x47, 0x36, 0x00, 0x09 },        /* 350   500      3.1   */
@@ -583,23 +583,51 @@ static const struct cnl_ddi_buf_trans ehl_combo_phy_ddi_translations_hbr2_hbr3[]
 };
 
 struct icl_mg_phy_ddi_buf_trans {
-       u32 cri_txdeemph_override_5_0;
        u32 cri_txdeemph_override_11_6;
+       u32 cri_txdeemph_override_5_0;
        u32 cri_txdeemph_override_17_12;
 };
 
-static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations[] = {
+static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations_rbr_hbr[] = {
+                               /* Voltage swing  pre-emphasis */
+       { 0x18, 0x00, 0x00 },   /* 0              0   */
+       { 0x1D, 0x00, 0x05 },   /* 0              1   */
+       { 0x24, 0x00, 0x0C },   /* 0              2   */
+       { 0x2B, 0x00, 0x14 },   /* 0              3   */
+       { 0x21, 0x00, 0x00 },   /* 1              0   */
+       { 0x2B, 0x00, 0x08 },   /* 1              1   */
+       { 0x30, 0x00, 0x0F },   /* 1              2   */
+       { 0x31, 0x00, 0x03 },   /* 2              0   */
+       { 0x34, 0x00, 0x0B },   /* 2              1   */
+       { 0x3F, 0x00, 0x00 },   /* 3              0   */
+};
+
+static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations_hbr2_hbr3[] = {
                                /* Voltage swing  pre-emphasis */
-       { 0x0, 0x1B, 0x00 },    /* 0              0   */
-       { 0x0, 0x23, 0x08 },    /* 0              1   */
-       { 0x0, 0x2D, 0x12 },    /* 0              2   */
-       { 0x0, 0x00, 0x00 },    /* 0              3   */
-       { 0x0, 0x23, 0x00 },    /* 1              0   */
-       { 0x0, 0x2B, 0x09 },    /* 1              1   */
-       { 0x0, 0x2E, 0x11 },    /* 1              2   */
-       { 0x0, 0x2F, 0x00 },    /* 2              0   */
-       { 0x0, 0x33, 0x0C },    /* 2              1   */
-       { 0x0, 0x00, 0x00 },    /* 3              0   */
+       { 0x18, 0x00, 0x00 },   /* 0              0   */
+       { 0x1D, 0x00, 0x05 },   /* 0              1   */
+       { 0x24, 0x00, 0x0C },   /* 0              2   */
+       { 0x2B, 0x00, 0x14 },   /* 0              3   */
+       { 0x26, 0x00, 0x00 },   /* 1              0   */
+       { 0x2C, 0x00, 0x07 },   /* 1              1   */
+       { 0x33, 0x00, 0x0C },   /* 1              2   */
+       { 0x2E, 0x00, 0x00 },   /* 2              0   */
+       { 0x36, 0x00, 0x09 },   /* 2              1   */
+       { 0x3F, 0x00, 0x00 },   /* 3              0   */
+};
+
+static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations_hdmi[] = {
+                               /* HDMI Preset  VS      Pre-emph */
+       { 0x1A, 0x0, 0x0 },     /* 1            400mV   0dB */
+       { 0x20, 0x0, 0x0 },     /* 2            500mV   0dB */
+       { 0x29, 0x0, 0x0 },     /* 3            650mV   0dB */
+       { 0x32, 0x0, 0x0 },     /* 4            800mV   0dB */
+       { 0x3F, 0x0, 0x0 },     /* 5            1000mV  0dB */
+       { 0x3A, 0x0, 0x5 },     /* 6            Full    -1.5 dB */
+       { 0x39, 0x0, 0x6 },     /* 7            Full    -1.8 dB */
+       { 0x38, 0x0, 0x7 },     /* 8            Full    -2 dB */
+       { 0x37, 0x0, 0x8 },     /* 9            Full    -2.5 dB */
+       { 0x36, 0x0, 0x9 },     /* 10           Full    -3 dB */
 };
 
 struct tgl_dkl_phy_ddi_buf_trans {
@@ -943,14 +971,29 @@ icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
        return icl_combo_phy_ddi_translations_dp_hbr2;
 }
 
+static const struct icl_mg_phy_ddi_buf_trans *
+icl_get_mg_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
+                    int *n_entries)
+{
+       if (type == INTEL_OUTPUT_HDMI) {
+               *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_hdmi);
+               return icl_mg_phy_ddi_translations_hdmi;
+       } else if (rate > 270000) {
+               *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_hbr2_hbr3);
+               return icl_mg_phy_ddi_translations_hbr2_hbr3;
+       }
+
+       *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_rbr_hbr);
+       return icl_mg_phy_ddi_translations_rbr_hbr;
+}
+
 static const struct cnl_ddi_buf_trans *
 ehl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
                        int *n_entries)
 {
-       if (type != INTEL_OUTPUT_HDMI && type != INTEL_OUTPUT_EDP &&
-           rate > 270000) {
-               *n_entries = ARRAY_SIZE(ehl_combo_phy_ddi_translations_hbr2_hbr3);
-               return ehl_combo_phy_ddi_translations_hbr2_hbr3;
+       if (type != INTEL_OUTPUT_HDMI && type != INTEL_OUTPUT_EDP) {
+               *n_entries = ARRAY_SIZE(ehl_combo_phy_ddi_translations_dp);
+               return ehl_combo_phy_ddi_translations_dp;
        }
 
        return icl_get_combo_buf_trans(dev_priv, type, rate, n_entries);
@@ -989,7 +1032,8 @@ static int intel_ddi_hdmi_level(struct intel_encoder *encoder)
                        icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
                                                0, &n_entries);
                else
-                       n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
+                       icl_get_mg_buf_trans(dev_priv, INTEL_OUTPUT_HDMI, 0,
+                                            &n_entries);
                default_entry = n_entries - 1;
        } else if (IS_CANNONLAKE(dev_priv)) {
                cnl_get_buf_trans_hdmi(dev_priv, &n_entries);
@@ -1103,7 +1147,8 @@ static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
                if (intel_de_read(dev_priv, reg) & DDI_BUF_IS_IDLE)
                        return;
        }
-       DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
+       drm_err(&dev_priv->drm, "Timeout waiting for DDI BUF %c idle bit\n",
+               port_name(port));
 }
 
 static u32 hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
@@ -1216,7 +1261,10 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
        for (i = 0; i < ARRAY_SIZE(hsw_ddi_translations_fdi) * 2; i++) {
                /* Configure DP_TP_CTL with auto-training */
                intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
-                              DP_TP_CTL_FDI_AUTOTRAIN | DP_TP_CTL_ENHANCED_FRAME_ENABLE | DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_ENABLE);
+                              DP_TP_CTL_FDI_AUTOTRAIN |
+                              DP_TP_CTL_ENHANCED_FRAME_ENABLE |
+                              DP_TP_CTL_LINK_TRAIN_PAT1 |
+                              DP_TP_CTL_ENABLE);
 
                /* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
                 * DDI E does not support port reversal, the functionality is
@@ -1250,7 +1298,8 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
 
                temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E));
                if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
-                       DRM_DEBUG_KMS("FDI link training done on step %d\n", i);
+                       drm_dbg_kms(&dev_priv->drm,
+                                   "FDI link training done on step %d\n", i);
                        break;
                }
 
@@ -1259,7 +1308,7 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
                 * Results in less fireworks from the state checker.
                 */
                if (i == ARRAY_SIZE(hsw_ddi_translations_fdi) * 2 - 1) {
-                       DRM_ERROR("FDI link training failed!\n");
+                       drm_err(&dev_priv->drm, "FDI link training failed!\n");
                        break;
                }
 
@@ -1291,7 +1340,10 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
 
        /* Enable normal pixel sending for FDI */
        intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
-                      DP_TP_CTL_FDI_AUTOTRAIN | DP_TP_CTL_LINK_TRAIN_NORMAL | DP_TP_CTL_ENHANCED_FRAME_ENABLE | DP_TP_CTL_ENABLE);
+                      DP_TP_CTL_FDI_AUTOTRAIN |
+                      DP_TP_CTL_LINK_TRAIN_NORMAL |
+                      DP_TP_CTL_ENHANCED_FRAME_ENABLE |
+                      DP_TP_CTL_ENABLE);
 }
 
 static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
@@ -1305,27 +1357,6 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
        intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
 }
 
-static struct intel_encoder *
-intel_ddi_get_crtc_encoder(struct intel_crtc *crtc)
-{
-       struct drm_device *dev = crtc->base.dev;
-       struct intel_encoder *encoder, *ret = NULL;
-       int num_encoders = 0;
-
-       for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
-               ret = encoder;
-               num_encoders++;
-       }
-
-       if (num_encoders != 1)
-               drm_WARN(dev, 1, "%d encoders on crtc for pipe %c\n",
-                        num_encoders,
-                        pipe_name(crtc->pipe));
-
-       BUG_ON(ret == NULL);
-       return ret;
-}
-
 static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv,
                                 enum port port)
 {
@@ -1451,6 +1482,14 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
        intel_de_write(dev_priv, TRANS_MSA_MISC(cpu_transcoder), temp);
 }
 
+static u32 bdw_trans_port_sync_master_select(enum transcoder master_transcoder)
+{
+       if (master_transcoder == TRANSCODER_EDP)
+               return 0;
+       else
+               return master_transcoder + 1;
+}
+
 /*
  * Returns the TRANS_DDI_FUNC_CTL value based on CRTC state.
  *
@@ -1458,10 +1497,10 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
  * intel_ddi_config_transcoder_func().
  */
 static u32
-intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state)
+intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
+                                     const struct intel_crtc_state *crtc_state)
 {
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
-       struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc);
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        enum pipe pipe = crtc->pipe;
        enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
@@ -1551,20 +1590,46 @@ intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state)
                temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
        }
 
+       if (IS_GEN_RANGE(dev_priv, 8, 10) &&
+           crtc_state->master_transcoder != INVALID_TRANSCODER) {
+               u8 master_select =
+                       bdw_trans_port_sync_master_select(crtc_state->master_transcoder);
+
+               temp |= TRANS_DDI_PORT_SYNC_ENABLE |
+                       TRANS_DDI_PORT_SYNC_MASTER_SELECT(master_select);
+       }
+
        return temp;
 }
 
-void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
+void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
+                                     const struct intel_crtc_state *crtc_state)
 {
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
-       u32 temp;
+       u32 ctl;
+
+       if (INTEL_GEN(dev_priv) >= 11) {
+               enum transcoder master_transcoder = crtc_state->master_transcoder;
+               u32 ctl2 = 0;
+
+               if (master_transcoder != INVALID_TRANSCODER) {
+                       u8 master_select =
+                               bdw_trans_port_sync_master_select(master_transcoder);
+
+                       ctl2 |= PORT_SYNC_MODE_ENABLE |
+                               PORT_SYNC_MODE_MASTER_SELECT(master_select);
+               }
 
-       temp = intel_ddi_transcoder_func_reg_val_get(crtc_state);
+               intel_de_write(dev_priv,
+                              TRANS_DDI_FUNC_CTL2(cpu_transcoder), ctl2);
+       }
+
+       ctl = intel_ddi_transcoder_func_reg_val_get(encoder, crtc_state);
        if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
-               temp |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
-       intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
+               ctl |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
+       intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), ctl);
 }
 
 /*
@@ -1572,16 +1637,17 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
  * bit.
  */
 static void
-intel_ddi_config_transcoder_func(const struct intel_crtc_state *crtc_state)
+intel_ddi_config_transcoder_func(struct intel_encoder *encoder,
+                                const struct intel_crtc_state *crtc_state)
 {
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
-       u32 temp;
+       u32 ctl;
 
-       temp = intel_ddi_transcoder_func_reg_val_get(crtc_state);
-       temp &= ~TRANS_DDI_FUNC_ENABLE;
-       intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
+       ctl = intel_ddi_transcoder_func_reg_val_get(encoder, crtc_state);
+       ctl &= ~TRANS_DDI_FUNC_ENABLE;
+       intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), ctl);
 }
 
 void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state)
@@ -1589,24 +1655,35 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
-       u32 val;
+       u32 ctl;
+
+       if (INTEL_GEN(dev_priv) >= 11)
+               intel_de_write(dev_priv,
+                              TRANS_DDI_FUNC_CTL2(cpu_transcoder), 0);
 
-       val = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
-       val &= ~TRANS_DDI_FUNC_ENABLE;
+       ctl = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
+
+       ctl &= ~TRANS_DDI_FUNC_ENABLE;
+
+       if (IS_GEN_RANGE(dev_priv, 8, 10))
+               ctl &= ~(TRANS_DDI_PORT_SYNC_ENABLE |
+                        TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK);
 
        if (INTEL_GEN(dev_priv) >= 12) {
                if (!intel_dp_mst_is_master_trans(crtc_state)) {
-                       val &= ~(TGL_TRANS_DDI_PORT_MASK |
+                       ctl &= ~(TGL_TRANS_DDI_PORT_MASK |
                                 TRANS_DDI_MODE_SELECT_MASK);
                }
        } else {
-               val &= ~(TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK);
+               ctl &= ~(TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK);
        }
-       intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), val);
+
+       intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), ctl);
 
        if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME &&
            intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
-               DRM_DEBUG_KMS("Quirk Increase DDI disabled time\n");
+               drm_dbg_kms(&dev_priv->drm,
+                           "Quirk Increase DDI disabled time\n");
                /* Quirk time at 100ms for reliable operation */
                msleep(100);
        }
@@ -1667,7 +1744,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
                goto out;
        }
 
-       if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A)
+       if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A)
                cpu_transcoder = TRANSCODER_EDP;
        else
                cpu_transcoder = (enum transcoder) pipe;
@@ -1729,7 +1806,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
        if (!(tmp & DDI_BUF_CTL_ENABLE))
                goto out;
 
-       if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A) {
+       if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A) {
                tmp = intel_de_read(dev_priv,
                                    TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
 
@@ -1787,20 +1864,23 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
        }
 
        if (!*pipe_mask)
-               DRM_DEBUG_KMS("No pipe for [ENCODER:%d:%s] found\n",
-                             encoder->base.base.id, encoder->base.name);
+               drm_dbg_kms(&dev_priv->drm,
+                           "No pipe for [ENCODER:%d:%s] found\n",
+                           encoder->base.base.id, encoder->base.name);
 
        if (!mst_pipe_mask && hweight8(*pipe_mask) > 1) {
-               DRM_DEBUG_KMS("Multiple pipes for [ENCODER:%d:%s] (pipe_mask %02x)\n",
-                             encoder->base.base.id, encoder->base.name,
-                             *pipe_mask);
+               drm_dbg_kms(&dev_priv->drm,
+                           "Multiple pipes for [ENCODER:%d:%s] (pipe_mask %02x)\n",
+                           encoder->base.base.id, encoder->base.name,
+                           *pipe_mask);
                *pipe_mask = BIT(ffs(*pipe_mask) - 1);
        }
 
        if (mst_pipe_mask && mst_pipe_mask != *pipe_mask)
-               DRM_DEBUG_KMS("Conflicting MST and non-MST state for [ENCODER:%d:%s] (pipe_mask %02x mst_pipe_mask %02x)\n",
-                             encoder->base.base.id, encoder->base.name,
-                             *pipe_mask, mst_pipe_mask);
+               drm_dbg_kms(&dev_priv->drm,
+                           "Conflicting MST and non-MST state for [ENCODER:%d:%s] (pipe_mask %02x mst_pipe_mask %02x)\n",
+                           encoder->base.base.id, encoder->base.name,
+                           *pipe_mask, mst_pipe_mask);
        else
                *is_dp_mst = mst_pipe_mask;
 
@@ -1810,9 +1890,9 @@ out:
                if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK |
                            BXT_PHY_LANE_POWERDOWN_ACK |
                            BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED)
-                       DRM_ERROR("[ENCODER:%d:%s] enabled but PHY powered down? "
-                                 "(PHY_CTL %08x)\n", encoder->base.base.id,
-                                 encoder->base.name, tmp);
+                       drm_err(&dev_priv->drm,
+                               "[ENCODER:%d:%s] enabled but PHY powered down? (PHY_CTL %08x)\n",
+                               encoder->base.base.id, encoder->base.name, tmp);
        }
 
        intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
@@ -1834,7 +1914,7 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
        return true;
 }
 
-static inline enum intel_display_power_domain
+static enum intel_display_power_domain
 intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port)
 {
        /* CNL+ HW requires corresponding AUX IOs to be powered up for PSR with
@@ -1893,11 +1973,11 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
                                        intel_dsc_power_domain(crtc_state));
 }
 
-void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state)
+void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder,
+                                const struct intel_crtc_state *crtc_state)
 {
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc);
        enum port port = encoder->port;
        enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
 
@@ -1978,7 +2058,7 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
 
        /* Make sure that the requested I_boost is valid */
        if (iboost && iboost != 0x1 && iboost != 0x3 && iboost != 0x7) {
-               DRM_ERROR("Invalid I_boost value %u\n", iboost);
+               drm_err(&dev_priv->drm, "Invalid I_boost value %u\n", iboost);
                return;
        }
 
@@ -2037,7 +2117,8 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
                        icl_get_combo_buf_trans(dev_priv, encoder->type,
                                                intel_dp->link_rate, &n_entries);
                else
-                       n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
+                       icl_get_mg_buf_trans(dev_priv, encoder->type,
+                                            intel_dp->link_rate, &n_entries);
        } else if (IS_CANNONLAKE(dev_priv)) {
                if (encoder->type == INTEL_OUTPUT_EDP)
                        cnl_get_buf_trans_edp(dev_priv, &n_entries);
@@ -2237,7 +2318,9 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
                return;
 
        if (level >= n_entries) {
-               DRM_DEBUG_KMS("DDI translation not found for level %d. Using %d instead.", level, n_entries - 1);
+               drm_dbg_kms(&dev_priv->drm,
+                           "DDI translation not found for level %d. Using %d instead.",
+                           level, n_entries - 1);
                level = n_entries - 1;
        }
 
@@ -2350,21 +2433,28 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
 }
 
 static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
-                                          int link_clock,
-                                          u32 level)
+                                          int link_clock, u32 level,
+                                          enum intel_output_type type)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
        const struct icl_mg_phy_ddi_buf_trans *ddi_translations;
        u32 n_entries, val;
-       int ln;
+       int ln, rate = 0;
+
+       if (type != INTEL_OUTPUT_HDMI) {
+               struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+               rate = intel_dp->link_rate;
+       }
 
-       n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
-       ddi_translations = icl_mg_phy_ddi_translations;
+       ddi_translations = icl_get_mg_buf_trans(dev_priv, type, rate,
+                                               &n_entries);
        /* The table does not have values for level 3 and level 9. */
        if (level >= n_entries || level == 3 || level == 9) {
-               DRM_DEBUG_KMS("DDI translation not found for level %d. Using %d instead.",
-                             level, n_entries - 2);
+               drm_dbg_kms(&dev_priv->drm,
+                           "DDI translation not found for level %d. Using %d instead.",
+                           level, n_entries - 2);
                level = n_entries - 2;
        }
 
@@ -2483,7 +2573,8 @@ static void icl_ddi_vswing_sequence(struct intel_encoder *encoder,
        if (intel_phy_is_combo(dev_priv, phy))
                icl_combo_phy_ddi_vswing_sequence(encoder, level, type);
        else
-               icl_mg_phy_ddi_vswing_sequence(encoder, link_clock, level);
+               icl_mg_phy_ddi_vswing_sequence(encoder, link_clock, level,
+                                              type);
 }
 
 static void
@@ -2550,8 +2641,9 @@ static void tgl_ddi_vswing_sequence(struct intel_encoder *encoder,
                tgl_dkl_phy_ddi_vswing_sequence(encoder, link_clock, level);
 }
 
-static u32 translate_signal_level(int signal_levels)
+static u32 translate_signal_level(struct intel_dp *intel_dp, int signal_levels)
 {
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
        int i;
 
        for (i = 0; i < ARRAY_SIZE(index_to_dp_signal_levels); i++) {
@@ -2559,8 +2651,9 @@ static u32 translate_signal_level(int signal_levels)
                        return i;
        }
 
-       WARN(1, "Unsupported voltage swing/pre-emphasis level: 0x%x\n",
-            signal_levels);
+       drm_WARN(&i915->drm, 1,
+                "Unsupported voltage swing/pre-emphasis level: 0x%x\n",
+                signal_levels);
 
        return 0;
 }
@@ -2571,46 +2664,73 @@ static u32 intel_ddi_dp_level(struct intel_dp *intel_dp)
        int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
                                         DP_TRAIN_PRE_EMPHASIS_MASK);
 
-       return translate_signal_level(signal_levels);
+       return translate_signal_level(intel_dp, signal_levels);
 }
 
-u32 bxt_signal_levels(struct intel_dp *intel_dp)
+static void
+tgl_set_signal_levels(struct intel_dp *intel_dp)
 {
-       struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
-       struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
-       struct intel_encoder *encoder = &dport->base;
+       struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
        int level = intel_ddi_dp_level(intel_dp);
 
-       if (INTEL_GEN(dev_priv) >= 12)
-               tgl_ddi_vswing_sequence(encoder, intel_dp->link_rate,
-                                       level, encoder->type);
-       else if (INTEL_GEN(dev_priv) >= 11)
-               icl_ddi_vswing_sequence(encoder, intel_dp->link_rate,
-                                       level, encoder->type);
-       else if (IS_CANNONLAKE(dev_priv))
-               cnl_ddi_vswing_sequence(encoder, level, encoder->type);
-       else
-               bxt_ddi_vswing_sequence(encoder, level, encoder->type);
+       tgl_ddi_vswing_sequence(encoder, intel_dp->link_rate,
+                               level, encoder->type);
+}
 
-       return 0;
+static void
+icl_set_signal_levels(struct intel_dp *intel_dp)
+{
+       struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+       int level = intel_ddi_dp_level(intel_dp);
+
+       icl_ddi_vswing_sequence(encoder, intel_dp->link_rate,
+                               level, encoder->type);
 }
 
-u32 ddi_signal_levels(struct intel_dp *intel_dp)
+static void
+cnl_set_signal_levels(struct intel_dp *intel_dp)
 {
-       struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
-       struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
-       struct intel_encoder *encoder = &dport->base;
+       struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
        int level = intel_ddi_dp_level(intel_dp);
 
+       cnl_ddi_vswing_sequence(encoder, level, encoder->type);
+}
+
+static void
+bxt_set_signal_levels(struct intel_dp *intel_dp)
+{
+       struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+       int level = intel_ddi_dp_level(intel_dp);
+
+       bxt_ddi_vswing_sequence(encoder, level, encoder->type);
+}
+
+static void
+hsw_set_signal_levels(struct intel_dp *intel_dp)
+{
+       struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       int level = intel_ddi_dp_level(intel_dp);
+       enum port port = encoder->port;
+       u32 signal_levels;
+
+       signal_levels = DDI_BUF_TRANS_SELECT(level);
+
+       drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
+                   signal_levels);
+
+       intel_dp->DP &= ~DDI_BUF_EMP_MASK;
+       intel_dp->DP |= signal_levels;
+
        if (IS_GEN9_BC(dev_priv))
                skl_ddi_set_iboost(encoder, level, encoder->type);
 
-       return DDI_BUF_TRANS_SELECT(level);
+       intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
+       intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
 }
 
-static inline
-u32 icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv,
-                             enum phy phy)
+static u32 icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv,
+                                    enum phy phy)
 {
        if (intel_phy_is_combo(dev_priv, phy)) {
                return ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
@@ -2698,8 +2818,9 @@ static void icl_sanitize_port_clk_off(struct drm_i915_private *dev_priv,
                if (drm_WARN_ON(&dev_priv->drm, ddi_clk_needed))
                        continue;
 
-               DRM_NOTE("PHY %c is disabled/in DSI mode with an ungated DDI clock, gate it\n",
-                        phy_name(phy));
+               drm_notice(&dev_priv->drm,
+                          "PHY %c is disabled/in DSI mode with an ungated DDI clock, gate it\n",
+                          phy_name(phy));
                val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
                intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
        }
@@ -2936,11 +3057,14 @@ icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port,
 static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
                                        const struct intel_crtc_state *crtc_state)
 {
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
        if (!crtc_state->fec_enable)
                return;
 
        if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_CONFIGURATION, DP_FEC_READY) <= 0)
-               DRM_DEBUG_KMS("Failed to set FEC_READY in the sink\n");
+               drm_dbg_kms(&i915->drm,
+                           "Failed to set FEC_READY in the sink\n");
 }
 
 static void intel_ddi_enable_fec(struct intel_encoder *encoder,
@@ -2960,7 +3084,8 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
 
        if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
                                  DP_TP_STATUS_FEC_ENABLE_LIVE, 1))
-               DRM_ERROR("Timed out waiting for FEC Enable Status\n");
+               drm_err(&dev_priv->drm,
+                       "Timed out waiting for FEC Enable Status\n");
 }
 
 static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
@@ -2980,7 +3105,8 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
        intel_de_posting_read(dev_priv, intel_dp->regs.dp_tp_ctl);
 }
 
-static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
+static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
+                                 struct intel_encoder *encoder,
                                  const struct intel_crtc_state *crtc_state,
                                  const struct drm_connector_state *conn_state)
 {
@@ -3048,13 +3174,13 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
         * 7.a Configure Transcoder Clock Select to direct the Port clock to the
         * Transcoder.
         */
-       intel_ddi_enable_pipe_clock(crtc_state);
+       intel_ddi_enable_pipe_clock(encoder, crtc_state);
 
        /*
         * 7.b Configure TRANS_DDI_FUNC_CTL DDI Select, DDI Mode Select & MST
         * Transport Select
         */
-       intel_ddi_config_transcoder_func(crtc_state);
+       intel_ddi_config_transcoder_func(encoder, crtc_state);
 
        /*
         * 7.c Configure & enable DP_TP_CTL with link training pattern 1
@@ -3120,7 +3246,8 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
        intel_dsc_enable(encoder, crtc_state);
 }
 
-static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder,
+static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
+                                 struct intel_encoder *encoder,
                                  const struct intel_crtc_state *crtc_state,
                                  const struct drm_connector_state *conn_state)
 {
@@ -3185,21 +3312,22 @@ static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder,
        intel_ddi_enable_fec(encoder, crtc_state);
 
        if (!is_mst)
-               intel_ddi_enable_pipe_clock(crtc_state);
+               intel_ddi_enable_pipe_clock(encoder, crtc_state);
 
        intel_dsc_enable(encoder, crtc_state);
 }
 
-static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
+static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state,
+                                   struct intel_encoder *encoder,
                                    const struct intel_crtc_state *crtc_state,
                                    const struct drm_connector_state *conn_state)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
        if (INTEL_GEN(dev_priv) >= 12)
-               tgl_ddi_pre_enable_dp(encoder, crtc_state, conn_state);
+               tgl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
        else
-               hsw_ddi_pre_enable_dp(encoder, crtc_state, conn_state);
+               hsw_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
 
        /* MST will call a setting of MSA after an allocating of Virtual Channel
         * from MST encoder pre_enable callback.
@@ -3211,7 +3339,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
        }
 }
 
-static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
+static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state,
+                                     struct intel_encoder *encoder,
                                      const struct intel_crtc_state *crtc_state,
                                      const struct drm_connector_state *conn_state)
 {
@@ -3244,14 +3373,15 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
        if (IS_GEN9_BC(dev_priv))
                skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI);
 
-       intel_ddi_enable_pipe_clock(crtc_state);
+       intel_ddi_enable_pipe_clock(encoder, crtc_state);
 
        intel_dig_port->set_infoframes(encoder,
                                       crtc_state->has_infoframe,
                                       crtc_state, conn_state);
 }
 
-static void intel_ddi_pre_enable(struct intel_encoder *encoder,
+static void intel_ddi_pre_enable(struct intel_atomic_state *state,
+                                struct intel_encoder *encoder,
                                 const struct intel_crtc_state *crtc_state,
                                 const struct drm_connector_state *conn_state)
 {
@@ -3280,12 +3410,14 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
        intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 
        if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
-               intel_ddi_pre_enable_hdmi(encoder, crtc_state, conn_state);
+               intel_ddi_pre_enable_hdmi(state, encoder, crtc_state,
+                                         conn_state);
        } else {
                struct intel_lspcon *lspcon =
                                enc_to_intel_lspcon(encoder);
 
-               intel_ddi_pre_enable_dp(encoder, crtc_state, conn_state);
+               intel_ddi_pre_enable_dp(state, encoder, crtc_state,
+                                       conn_state);
                if (lspcon->active) {
                        struct intel_digital_port *dig_port =
                                        enc_to_dig_port(encoder);
@@ -3328,7 +3460,8 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder,
                intel_wait_ddi_buf_idle(dev_priv, port);
 }
 
-static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
+static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
+                                     struct intel_encoder *encoder,
                                      const struct intel_crtc_state *old_crtc_state,
                                      const struct drm_connector_state *old_conn_state)
 {
@@ -3339,6 +3472,8 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
                                          INTEL_OUTPUT_DP_MST);
        enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
 
+       intel_dp_set_infoframes(encoder, false, old_crtc_state, old_conn_state);
+
        /*
         * Power down sink before disabling the port, otherwise we end
         * up getting interrupts from the sink on detecting link loss.
@@ -3384,7 +3519,8 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
        intel_ddi_clk_disable(encoder);
 }
 
-static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
+static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
+                                       struct intel_encoder *encoder,
                                        const struct intel_crtc_state *old_crtc_state,
                                        const struct drm_connector_state *old_conn_state)
 {
@@ -3407,22 +3543,8 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
        intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
 }
 
-static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
-       if (old_crtc_state->master_transcoder == INVALID_TRANSCODER)
-               return;
-
-       DRM_DEBUG_KMS("Disabling Transcoder Port Sync on Slave Transcoder %s\n",
-                     transcoder_name(old_crtc_state->cpu_transcoder));
-
-       intel_de_write(dev_priv,
-                      TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder), 0);
-}
-
-static void intel_ddi_post_disable(struct intel_encoder *encoder,
+static void intel_ddi_post_disable(struct intel_atomic_state *state,
+                                  struct intel_encoder *encoder,
                                   const struct intel_crtc_state *old_crtc_state,
                                   const struct drm_connector_state *old_conn_state)
 {
@@ -3436,9 +3558,6 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder,
 
                intel_disable_pipe(old_crtc_state);
 
-               if (INTEL_GEN(dev_priv) >= 11)
-                       icl_disable_transcoder_port_sync(old_crtc_state);
-
                intel_ddi_disable_transcoder_func(old_crtc_state);
 
                intel_dsc_disable(old_crtc_state);
@@ -3463,11 +3582,11 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder,
         */
 
        if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI))
-               intel_ddi_post_disable_hdmi(encoder,
-                                           old_crtc_state, old_conn_state);
+               intel_ddi_post_disable_hdmi(state, encoder, old_crtc_state,
+                                           old_conn_state);
        else
-               intel_ddi_post_disable_dp(encoder,
-                                         old_crtc_state, old_conn_state);
+               intel_ddi_post_disable_dp(state, encoder, old_crtc_state,
+                                         old_conn_state);
 
        if (INTEL_GEN(dev_priv) >= 11)
                icl_unmap_plls_to_ports(encoder);
@@ -3480,7 +3599,8 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder,
                intel_tc_port_put_link(dig_port);
 }
 
-void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
+void intel_ddi_fdi_post_disable(struct intel_atomic_state *state,
+                               struct intel_encoder *encoder,
                                const struct intel_crtc_state *old_crtc_state,
                                const struct drm_connector_state *old_conn_state)
 {
@@ -3514,7 +3634,43 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
        intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
 }
 
-static void intel_enable_ddi_dp(struct intel_encoder *encoder,
+static void trans_port_sync_stop_link_train(struct intel_atomic_state *state,
+                                           struct intel_encoder *encoder,
+                                           const struct intel_crtc_state *crtc_state)
+{
+       const struct drm_connector_state *conn_state;
+       struct drm_connector *conn;
+       int i;
+
+       if (!crtc_state->sync_mode_slaves_mask)
+               return;
+
+       for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
+               struct intel_encoder *slave_encoder =
+                       to_intel_encoder(conn_state->best_encoder);
+               struct intel_crtc *slave_crtc = to_intel_crtc(conn_state->crtc);
+               const struct intel_crtc_state *slave_crtc_state;
+
+               if (!slave_crtc)
+                       continue;
+
+               slave_crtc_state =
+                       intel_atomic_get_new_crtc_state(state, slave_crtc);
+
+               if (slave_crtc_state->master_transcoder !=
+                   crtc_state->cpu_transcoder)
+                       continue;
+
+               intel_dp_stop_link_train(enc_to_intel_dp(slave_encoder));
+       }
+
+       usleep_range(200, 400);
+
+       intel_dp_stop_link_train(enc_to_intel_dp(encoder));
+}
+
+static void intel_enable_ddi_dp(struct intel_atomic_state *state,
+                               struct intel_encoder *encoder,
                                const struct intel_crtc_state *crtc_state,
                                const struct drm_connector_state *conn_state)
 {
@@ -3526,13 +3682,14 @@ static void intel_enable_ddi_dp(struct intel_encoder *encoder,
                intel_dp_stop_link_train(intel_dp);
 
        intel_edp_backlight_on(crtc_state, conn_state);
-       intel_psr_enable(intel_dp, crtc_state);
-       intel_dp_vsc_enable(intel_dp, crtc_state, conn_state);
-       intel_dp_hdr_metadata_enable(intel_dp, crtc_state, conn_state);
+       intel_psr_enable(intel_dp, crtc_state, conn_state);
+       intel_dp_set_infoframes(encoder, true, crtc_state, conn_state);
        intel_edp_drrs_enable(intel_dp, crtc_state);
 
        if (crtc_state->has_audio)
                intel_audio_codec_enable(encoder, crtc_state, conn_state);
+
+       trans_port_sync_stop_link_train(state, encoder, crtc_state);
 }
 
 static i915_reg_t
@@ -3555,7 +3712,8 @@ gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv,
        return CHICKEN_TRANS(trans[port]);
 }
 
-static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
+static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
+                                 struct intel_encoder *encoder,
                                  const struct intel_crtc_state *crtc_state,
                                  const struct drm_connector_state *conn_state)
 {
@@ -3567,9 +3725,9 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
        if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
                                               crtc_state->hdmi_high_tmds_clock_ratio,
                                               crtc_state->hdmi_scrambling))
-               DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Failed to configure sink "
-                             "scrambling/TMDS bit clock ratio\n",
-                              connector->base.id, connector->name);
+               drm_dbg_kms(&dev_priv->drm,
+                           "[CONNECTOR:%d:%s] Failed to configure sink scrambling/TMDS bit clock ratio\n",
+                           connector->base.id, connector->name);
 
        /* Display WA #1143: skl,kbl,cfl */
        if (IS_GEN9_BC(dev_priv)) {
@@ -3617,20 +3775,23 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
                intel_audio_codec_enable(encoder, crtc_state, conn_state);
 }
 
-static void intel_enable_ddi(struct intel_encoder *encoder,
+static void intel_enable_ddi(struct intel_atomic_state *state,
+                            struct intel_encoder *encoder,
                             const struct intel_crtc_state *crtc_state,
                             const struct drm_connector_state *conn_state)
 {
-       WARN_ON(crtc_state->has_pch_encoder);
+       drm_WARN_ON(state->base.dev, crtc_state->has_pch_encoder);
+
+       intel_ddi_enable_transcoder_func(encoder, crtc_state);
 
        intel_enable_pipe(crtc_state);
 
        intel_crtc_vblank_on(crtc_state);
 
        if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
-               intel_enable_ddi_hdmi(encoder, crtc_state, conn_state);
+               intel_enable_ddi_hdmi(state, encoder, crtc_state, conn_state);
        else
-               intel_enable_ddi_dp(encoder, crtc_state, conn_state);
+               intel_enable_ddi_dp(state, encoder, crtc_state, conn_state);
 
        /* Enable hdcp if it's desired */
        if (conn_state->content_protection ==
@@ -3640,7 +3801,8 @@ static void intel_enable_ddi(struct intel_encoder *encoder,
                                  (u8)conn_state->hdcp_content_type);
 }
 
-static void intel_disable_ddi_dp(struct intel_encoder *encoder,
+static void intel_disable_ddi_dp(struct intel_atomic_state *state,
+                                struct intel_encoder *encoder,
                                 const struct intel_crtc_state *old_crtc_state,
                                 const struct drm_connector_state *old_conn_state)
 {
@@ -3660,10 +3822,12 @@ static void intel_disable_ddi_dp(struct intel_encoder *encoder,
                                              false);
 }
 
-static void intel_disable_ddi_hdmi(struct intel_encoder *encoder,
+static void intel_disable_ddi_hdmi(struct intel_atomic_state *state,
+                                  struct intel_encoder *encoder,
                                   const struct intel_crtc_state *old_crtc_state,
                                   const struct drm_connector_state *old_conn_state)
 {
+       struct drm_i915_private *i915 = to_i915(encoder->base.dev);
        struct drm_connector *connector = old_conn_state->connector;
 
        if (old_crtc_state->has_audio)
@@ -3672,23 +3836,28 @@ static void intel_disable_ddi_hdmi(struct intel_encoder *encoder,
 
        if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
                                               false, false))
-               DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Failed to reset sink scrambling/TMDS bit clock ratio\n",
-                             connector->base.id, connector->name);
+               drm_dbg_kms(&i915->drm,
+                           "[CONNECTOR:%d:%s] Failed to reset sink scrambling/TMDS bit clock ratio\n",
+                           connector->base.id, connector->name);
 }
 
-static void intel_disable_ddi(struct intel_encoder *encoder,
+static void intel_disable_ddi(struct intel_atomic_state *state,
+                             struct intel_encoder *encoder,
                              const struct intel_crtc_state *old_crtc_state,
                              const struct drm_connector_state *old_conn_state)
 {
        intel_hdcp_disable(to_intel_connector(old_conn_state->connector));
 
        if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI))
-               intel_disable_ddi_hdmi(encoder, old_crtc_state, old_conn_state);
+               intel_disable_ddi_hdmi(state, encoder, old_crtc_state,
+                                      old_conn_state);
        else
-               intel_disable_ddi_dp(encoder, old_crtc_state, old_conn_state);
+               intel_disable_ddi_dp(state, encoder, old_crtc_state,
+                                    old_conn_state);
 }
 
-static void intel_ddi_update_pipe_dp(struct intel_encoder *encoder,
+static void intel_ddi_update_pipe_dp(struct intel_atomic_state *state,
+                                    struct intel_encoder *encoder,
                                     const struct intel_crtc_state *crtc_state,
                                     const struct drm_connector_state *conn_state)
 {
@@ -3696,21 +3865,24 @@ static void intel_ddi_update_pipe_dp(struct intel_encoder *encoder,
 
        intel_ddi_set_dp_msa(crtc_state, conn_state);
 
-       intel_psr_update(intel_dp, crtc_state);
+       intel_psr_update(intel_dp, crtc_state, conn_state);
+       intel_dp_set_infoframes(encoder, true, crtc_state, conn_state);
        intel_edp_drrs_enable(intel_dp, crtc_state);
 
-       intel_panel_update_backlight(encoder, crtc_state, conn_state);
+       intel_panel_update_backlight(state, encoder, crtc_state, conn_state);
 }
 
-static void intel_ddi_update_pipe(struct intel_encoder *encoder,
+static void intel_ddi_update_pipe(struct intel_atomic_state *state,
+                                 struct intel_encoder *encoder,
                                  const struct intel_crtc_state *crtc_state,
                                  const struct drm_connector_state *conn_state)
 {
 
        if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
-               intel_ddi_update_pipe_dp(encoder, crtc_state, conn_state);
+               intel_ddi_update_pipe_dp(state, encoder, crtc_state,
+                                        conn_state);
 
-       intel_hdcp_update_pipe(encoder, crtc_state, conn_state);
+       intel_hdcp_update_pipe(state, encoder, crtc_state, conn_state);
 }
 
 static void
@@ -3722,7 +3894,7 @@ intel_ddi_update_prepare(struct intel_atomic_state *state,
                crtc ? intel_atomic_get_new_crtc_state(state, crtc) : NULL;
        int required_lanes = crtc_state ? crtc_state->lane_count : 1;
 
-       WARN_ON(crtc && crtc->active);
+       drm_WARN_ON(state->base.dev, crtc && crtc->active);
 
        intel_tc_port_get_link(enc_to_dig_port(encoder),
                               required_lanes);
@@ -3739,7 +3911,8 @@ intel_ddi_update_complete(struct intel_atomic_state *state,
 }
 
 static void
-intel_ddi_pre_pll_enable(struct intel_encoder *encoder,
+intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
+                        struct intel_encoder *encoder,
                         const struct intel_crtc_state *crtc_state,
                         const struct drm_connector_state *conn_state)
 {
@@ -3813,6 +3986,74 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
        udelay(600);
 }
 
+static void intel_ddi_set_link_train(struct intel_dp *intel_dp,
+                                    u8 dp_train_pat)
+{
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+       u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
+       enum port port = dp_to_dig_port(intel_dp)->base.port;
+       u32 temp;
+
+       temp = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
+
+       if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
+               temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
+       else
+               temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
+
+       temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
+       switch (dp_train_pat & train_pat_mask) {
+       case DP_TRAINING_PATTERN_DISABLE:
+               temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
+               break;
+       case DP_TRAINING_PATTERN_1:
+               temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
+               break;
+       case DP_TRAINING_PATTERN_2:
+               temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
+               break;
+       case DP_TRAINING_PATTERN_3:
+               temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
+               break;
+       case DP_TRAINING_PATTERN_4:
+               temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
+               break;
+       }
+
+       intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, temp);
+
+       intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
+       intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
+}
+
+static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp)
+{
+       struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       enum port port = encoder->port;
+       u32 val;
+
+       val = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
+       val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
+       val |= DP_TP_CTL_LINK_TRAIN_IDLE;
+       intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, val);
+
+       /*
+        * Until TGL on PORT_A we can have only eDP in SST mode. There the only
+        * reason we need to set idle transmission mode is to work around a HW
+        * issue where we enable the pipe while not in idle link-training mode.
+        * In this case there is requirement to wait for a minimum number of
+        * idle patterns to be sent.
+        */
+       if (port == PORT_A && INTEL_GEN(dev_priv) < 12)
+               return;
+
+       if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
+                                 DP_TP_STATUS_IDLE_DONE, 1))
+               drm_err(&dev_priv->drm,
+                       "Timed out waiting for DP idle patterns\n");
+}
+
 static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
                                       enum transcoder cpu_transcoder)
 {
@@ -3839,6 +4080,66 @@ void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
                crtc_state->min_voltage_level = 2;
 }
 
+static enum transcoder bdw_transcoder_master_readout(struct drm_i915_private *dev_priv,
+                                                    enum transcoder cpu_transcoder)
+{
+       u32 master_select;
+
+       if (INTEL_GEN(dev_priv) >= 11) {
+               u32 ctl2 = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL2(cpu_transcoder));
+
+               if ((ctl2 & PORT_SYNC_MODE_ENABLE) == 0)
+                       return INVALID_TRANSCODER;
+
+               master_select = REG_FIELD_GET(PORT_SYNC_MODE_MASTER_SELECT_MASK, ctl2);
+       } else {
+               u32 ctl = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
+
+               if ((ctl & TRANS_DDI_PORT_SYNC_ENABLE) == 0)
+                       return INVALID_TRANSCODER;
+
+               master_select = REG_FIELD_GET(TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK, ctl);
+       }
+
+       if (master_select == 0)
+               return TRANSCODER_EDP;
+       else
+               return master_select - 1;
+}
+
+static void bdw_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+       u32 transcoders = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
+               BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
+       enum transcoder cpu_transcoder;
+
+       crtc_state->master_transcoder =
+               bdw_transcoder_master_readout(dev_priv, crtc_state->cpu_transcoder);
+
+       for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
+               enum intel_display_power_domain power_domain;
+               intel_wakeref_t trans_wakeref;
+
+               power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
+               trans_wakeref = intel_display_power_get_if_enabled(dev_priv,
+                                                                  power_domain);
+
+               if (!trans_wakeref)
+                       continue;
+
+               if (bdw_transcoder_master_readout(dev_priv, cpu_transcoder) ==
+                   crtc_state->cpu_transcoder)
+                       crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder);
+
+               intel_display_power_put(dev_priv, power_domain, trans_wakeref);
+       }
+
+       drm_WARN_ON(&dev_priv->drm,
+                   crtc_state->master_transcoder != INVALID_TRANSCODER &&
+                   crtc_state->sync_mode_slaves_mask);
+}
+
 void intel_ddi_get_config(struct intel_encoder *encoder,
                          struct intel_crtc_state *pipe_config)
 {
@@ -3930,11 +4231,15 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
                        pipe_config->fec_enable =
                                intel_de_read(dev_priv, dp_tp_ctl) & DP_TP_CTL_FEC_ENABLE;
 
-                       DRM_DEBUG_KMS("[ENCODER:%d:%s] Fec status: %u\n",
-                                     encoder->base.base.id, encoder->base.name,
-                                     pipe_config->fec_enable);
+                       drm_dbg_kms(&dev_priv->drm,
+                                   "[ENCODER:%d:%s] Fec status: %u\n",
+                                   encoder->base.base.id, encoder->base.name,
+                                   pipe_config->fec_enable);
                }
 
+               pipe_config->infoframes.enable |=
+                       intel_hdmi_infoframes_enabled(encoder, pipe_config);
+
                break;
        case TRANS_DDI_MODE_SELECT_DP_MST:
                pipe_config->output_types |= BIT(INTEL_OUTPUT_DP_MST);
@@ -3946,6 +4251,9 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
                                        REG_FIELD_GET(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, temp);
 
                intel_dp_get_m_n(intel_crtc, pipe_config);
+
+               pipe_config->infoframes.enable |=
+                       intel_hdmi_infoframes_enabled(encoder, pipe_config);
                break;
        default:
                break;
@@ -3969,8 +4277,9 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
                 * up by the BIOS, and thus we can't get the mode at module
                 * load.
                 */
-               DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
-                             pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
+               drm_dbg_kms(&dev_priv->drm,
+                           "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
+                           pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
                dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
        }
 
@@ -3996,6 +4305,12 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
        intel_read_infoframe(encoder, pipe_config,
                             HDMI_INFOFRAME_TYPE_DRM,
                             &pipe_config->infoframes.drm);
+
+       if (INTEL_GEN(dev_priv) >= 8)
+               bdw_get_trans_port_sync_config(pipe_config);
+
+       intel_read_dp_sdp(encoder, pipe_config, HDMI_PACKET_TYPE_GAMUT_METADATA);
+       intel_read_dp_sdp(encoder, pipe_config, DP_SDP_VSC);
 }
 
 static enum intel_output_type
@@ -4025,7 +4340,7 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
        enum port port = encoder->port;
        int ret;
 
-       if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A)
+       if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A)
                pipe_config->cpu_transcoder = TRANSCODER_EDP;
 
        if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) {
@@ -4097,7 +4412,11 @@ intel_ddi_port_sync_transcoders(const struct intel_crtc_state *ref_crtc_state,
        u8 transcoders = 0;
        int i;
 
-       if (INTEL_GEN(dev_priv) < 11)
+       /*
+        * We don't enable port sync on BDW due to missing w/as and
+        * due to not having adjusted the modeset sequence appropriately.
+        */
+       if (INTEL_GEN(dev_priv) < 9)
                return 0;
 
        if (!intel_crtc_has_type(ref_crtc_state, INTEL_OUTPUT_DP))
@@ -4129,12 +4448,13 @@ static int intel_ddi_compute_config_late(struct intel_encoder *encoder,
                                         struct intel_crtc_state *crtc_state,
                                         struct drm_connector_state *conn_state)
 {
+       struct drm_i915_private *i915 = to_i915(encoder->base.dev);
        struct drm_connector *connector = conn_state->connector;
        u8 port_sync_transcoders = 0;
 
-       DRM_DEBUG_KMS("[ENCODER:%d:%s] [CRTC:%d:%s]",
-                     encoder->base.base.id, encoder->base.name,
-                     crtc_state->uapi.crtc->base.id, crtc_state->uapi.crtc->name);
+       drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] [CRTC:%d:%s]",
+                   encoder->base.base.id, encoder->base.name,
+                   crtc_state->uapi.crtc->base.id, crtc_state->uapi.crtc->name);
 
        if (connector->has_tile)
                port_sync_transcoders = intel_ddi_port_sync_transcoders(crtc_state,
@@ -4187,6 +4507,20 @@ intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port)
        intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
        intel_dig_port->dp.prepare_link_retrain =
                intel_ddi_prepare_link_retrain;
+       intel_dig_port->dp.set_link_train = intel_ddi_set_link_train;
+       intel_dig_port->dp.set_idle_link_train = intel_ddi_set_idle_link_train;
+
+       if (INTEL_GEN(dev_priv) >= 12)
+               intel_dig_port->dp.set_signal_levels = tgl_set_signal_levels;
+       else if (INTEL_GEN(dev_priv) >= 11)
+               intel_dig_port->dp.set_signal_levels = icl_set_signal_levels;
+       else if (IS_CANNONLAKE(dev_priv))
+               intel_dig_port->dp.set_signal_levels = cnl_set_signal_levels;
+       else if (IS_GEN9_LP(dev_priv))
+               intel_dig_port->dp.set_signal_levels = bxt_set_signal_levels;
+       else
+               intel_dig_port->dp.set_signal_levels = hsw_set_signal_levels;
+
        if (INTEL_GEN(dev_priv) < 12) {
                intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port);
                intel_dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port);
@@ -4278,7 +4612,8 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
 
        ret = drm_scdc_readb(adapter, SCDC_TMDS_CONFIG, &config);
        if (ret < 0) {
-               DRM_ERROR("Failed to read TMDS config: %d\n", ret);
+               drm_err(&dev_priv->drm, "Failed to read TMDS config: %d\n",
+                       ret);
                return 0;
        }
 
@@ -4302,15 +4637,17 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
 
 static enum intel_hotplug_state
 intel_ddi_hotplug(struct intel_encoder *encoder,
-                 struct intel_connector *connector,
-                 bool irq_received)
+                 struct intel_connector *connector)
 {
+       struct drm_i915_private *i915 = to_i915(encoder->base.dev);
        struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+       enum phy phy = intel_port_to_phy(i915, encoder->port);
+       bool is_tc = intel_phy_is_tc(i915, phy);
        struct drm_modeset_acquire_ctx ctx;
        enum intel_hotplug_state state;
        int ret;
 
-       state = intel_encoder_hotplug(encoder, connector, irq_received);
+       state = intel_encoder_hotplug(encoder, connector);
 
        drm_modeset_acquire_init(&ctx, 0);
 
@@ -4348,14 +4685,45 @@ intel_ddi_hotplug(struct intel_encoder *encoder,
         * valid EDID. To solve this schedule another detection cycle if this
         * time around we didn't detect any change in the sink's connection
         * status.
+        *
+        * Type-c connectors which get their HPD signal deasserted then
+        * reasserted, without unplugging/replugging the sink from the
+        * connector, introduce a delay until the AUX channel communication
+        * becomes functional. Retry the detection for 5 seconds on type-c
+        * connectors to account for this delay.
         */
-       if (state == INTEL_HOTPLUG_UNCHANGED && irq_received &&
+       if (state == INTEL_HOTPLUG_UNCHANGED &&
+           connector->hotplug_retries < (is_tc ? 5 : 1) &&
            !dig_port->dp.is_mst)
                state = INTEL_HOTPLUG_RETRY;
 
        return state;
 }
 
+static bool lpt_digital_port_connected(struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       u32 bit = dev_priv->hotplug.pch_hpd[encoder->hpd_pin];
+
+       return intel_de_read(dev_priv, SDEISR) & bit;
+}
+
+static bool hsw_digital_port_connected(struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin];
+
+       return intel_de_read(dev_priv, DEISR) & bit;
+}
+
+static bool bdw_digital_port_connected(struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin];
+
+       return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & bit;
+}
+
 static struct intel_connector *
 intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port)
 {
@@ -4424,7 +4792,8 @@ intel_ddi_max_lanes(struct intel_digital_port *intel_dport)
         * so we use the proper lane count for our calculations.
         */
        if (intel_ddi_a_force_4_lanes(intel_dport)) {
-               DRM_DEBUG_KMS("Forcing DDI_A_4_LANES for port A\n");
+               drm_dbg_kms(&dev_priv->drm,
+                           "Forcing DDI_A_4_LANES for port A\n");
                intel_dport->saved_port_bits |= DDI_A_4_LANES;
                max_lanes = 4;
        }
@@ -4452,12 +4821,14 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
                init_dp = true;
                init_lspcon = true;
                init_hdmi = false;
-               DRM_DEBUG_KMS("VBT says port %c has lspcon\n", port_name(port));
+               drm_dbg_kms(&dev_priv->drm, "VBT says port %c has lspcon\n",
+                           port_name(port));
        }
 
        if (!init_dp && !init_hdmi) {
-               DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible, respect it\n",
-                             port_name(port));
+               drm_dbg_kms(&dev_priv->drm,
+                           "VBT says port %c is not DVI/HDMI/DP compatible, respect it\n",
+                           port_name(port));
                return;
        }
 
@@ -4536,17 +4907,36 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
        if (init_lspcon) {
                if (lspcon_init(intel_dig_port))
                        /* TODO: handle hdmi info frame part */
-                       DRM_DEBUG_KMS("LSPCON init success on port %c\n",
-                               port_name(port));
+                       drm_dbg_kms(&dev_priv->drm,
+                                   "LSPCON init success on port %c\n",
+                                   port_name(port));
                else
                        /*
                         * LSPCON init faied, but DP init was success, so
                         * lets try to drive as DP++ port.
                         */
-                       DRM_ERROR("LSPCON init failed on port %c\n",
+                       drm_err(&dev_priv->drm,
+                               "LSPCON init failed on port %c\n",
                                port_name(port));
        }
 
+       if (INTEL_GEN(dev_priv) >= 11) {
+               if (intel_phy_is_tc(dev_priv, phy))
+                       intel_dig_port->connected = intel_tc_port_connected;
+               else
+                       intel_dig_port->connected = lpt_digital_port_connected;
+       } else if (INTEL_GEN(dev_priv) >= 8) {
+               if (port == PORT_A || IS_GEN9_LP(dev_priv))
+                       intel_dig_port->connected = bdw_digital_port_connected;
+               else
+                       intel_dig_port->connected = lpt_digital_port_connected;
+       } else {
+               if (port == PORT_A)
+                       intel_dig_port->connected = hsw_digital_port_connected;
+               else
+                       intel_dig_port->connected = lpt_digital_port_connected;
+       }
+
        intel_infoframe_init(intel_dig_port);
 
        return;
index 55fd72b901fe4c79ae760461449e412477784e27..fbdf8ddde486d85e9ea5977282066c1bfb7348c0 100644 (file)
@@ -17,16 +17,19 @@ struct intel_dp;
 struct intel_dpll_hw_state;
 struct intel_encoder;
 
-void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
+void intel_ddi_fdi_post_disable(struct intel_atomic_state *state,
+                               struct intel_encoder *intel_encoder,
                                const struct intel_crtc_state *old_crtc_state,
                                const struct drm_connector_state *old_conn_state);
 void hsw_fdi_link_train(struct intel_encoder *encoder,
                        const struct intel_crtc_state *crtc_state);
 void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
 bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
-void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state);
+void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
+                                     const struct intel_crtc_state *crtc_state);
 void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
-void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
+void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder,
+                                const struct intel_crtc_state *crtc_state);
 void intel_ddi_disable_pipe_clock(const  struct intel_crtc_state *crtc_state);
 void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
                          const struct drm_connector_state *conn_state);
index 346846609f45c9aa522be701d3aaddfb3484d10d..9ea1a397d1b54e813822a42eb5cf05a85b7fe876 100644 (file)
@@ -238,9 +238,9 @@ static void intel_update_czclk(struct drm_i915_private *dev_priv)
                dev_priv->czclk_freq);
 }
 
-static inline u32 /* units of 100MHz */
-intel_fdi_link_freq(struct drm_i915_private *dev_priv,
-                   const struct intel_crtc_state *pipe_config)
+/* units of 100MHz */
+static u32 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
+                              const struct intel_crtc_state *pipe_config)
 {
        if (HAS_DDI(dev_priv))
                return pipe_config->port_clock; /* SPLL */
@@ -525,7 +525,7 @@ skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
                               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
 }
 
-/* Wa_2006604312:icl */
+/* Wa_2006604312:icl,ehl */
 static void
 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
                       bool enable)
@@ -544,17 +544,23 @@ needs_modeset(const struct intel_crtc_state *state)
        return drm_atomic_crtc_needs_modeset(&state->uapi);
 }
 
-bool
-is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
+static bool
+is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
 {
-       return (crtc_state->master_transcoder != INVALID_TRANSCODER ||
-               crtc_state->sync_mode_slaves_mask);
+       return crtc_state->master_transcoder != INVALID_TRANSCODER;
 }
 
 static bool
-is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
+is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
 {
-       return crtc_state->master_transcoder != INVALID_TRANSCODER;
+       return crtc_state->sync_mode_slaves_mask != 0;
+}
+
+bool
+is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
+{
+       return is_trans_port_sync_master(crtc_state) ||
+               is_trans_port_sync_slave(crtc_state);
 }
 
 /*
@@ -620,45 +626,43 @@ int chv_calc_dpll_params(int refclk, struct dpll *clock)
        return clock->dot / 5;
 }
 
-#define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
-
 /*
  * Returns whether the given set of divisors are valid for a given refclk with
  * the given connectors.
  */
-static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
+static bool intel_pll_is_valid(struct drm_i915_private *dev_priv,
                               const struct intel_limit *limit,
                               const struct dpll *clock)
 {
-       if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
-               INTELPllInvalid("n out of range\n");
-       if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
-               INTELPllInvalid("p1 out of range\n");
-       if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
-               INTELPllInvalid("m2 out of range\n");
-       if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
-               INTELPllInvalid("m1 out of range\n");
+       if (clock->n < limit->n.min || limit->n.max < clock->n)
+               return false;
+       if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
+               return false;
+       if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
+               return false;
+       if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
+               return false;
 
        if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
            !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
                if (clock->m1 <= clock->m2)
-                       INTELPllInvalid("m1 <= m2\n");
+                       return false;
 
        if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
            !IS_GEN9_LP(dev_priv)) {
                if (clock->p < limit->p.min || limit->p.max < clock->p)
-                       INTELPllInvalid("p out of range\n");
+                       return false;
                if (clock->m < limit->m.min || limit->m.max < clock->m)
-                       INTELPllInvalid("m out of range\n");
+                       return false;
        }
 
        if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
-               INTELPllInvalid("vco out of range\n");
+               return false;
        /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
         * connector, etc., rather than just a single range.
         */
        if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
-               INTELPllInvalid("dot out of range\n");
+               return false;
 
        return true;
 }
@@ -725,7 +729,7 @@ i9xx_find_best_dpll(const struct intel_limit *limit,
                                        int this_err;
 
                                        i9xx_calc_dpll_params(refclk, &clock);
-                                       if (!intel_PLL_is_valid(to_i915(dev),
+                                       if (!intel_pll_is_valid(to_i915(dev),
                                                                limit,
                                                                &clock))
                                                continue;
@@ -781,7 +785,7 @@ pnv_find_best_dpll(const struct intel_limit *limit,
                                        int this_err;
 
                                        pnv_calc_dpll_params(refclk, &clock);
-                                       if (!intel_PLL_is_valid(to_i915(dev),
+                                       if (!intel_pll_is_valid(to_i915(dev),
                                                                limit,
                                                                &clock))
                                                continue;
@@ -842,7 +846,7 @@ g4x_find_best_dpll(const struct intel_limit *limit,
                                        int this_err;
 
                                        i9xx_calc_dpll_params(refclk, &clock);
-                                       if (!intel_PLL_is_valid(to_i915(dev),
+                                       if (!intel_pll_is_valid(to_i915(dev),
                                                                limit,
                                                                &clock))
                                                continue;
@@ -939,7 +943,7 @@ vlv_find_best_dpll(const struct intel_limit *limit,
 
                                        vlv_calc_dpll_params(refclk, &clock);
 
-                                       if (!intel_PLL_is_valid(to_i915(dev),
+                                       if (!intel_pll_is_valid(to_i915(dev),
                                                                limit,
                                                                &clock))
                                                continue;
@@ -1008,7 +1012,7 @@ chv_find_best_dpll(const struct intel_limit *limit,
 
                        chv_calc_dpll_params(refclk, &clock);
 
-                       if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
+                       if (!intel_pll_is_valid(to_i915(dev), limit, &clock))
                                continue;
 
                        if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
@@ -1969,16 +1973,16 @@ static bool is_aux_plane(const struct drm_framebuffer *fb, int plane)
 
 static int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane)
 {
-       WARN_ON(!is_ccs_modifier(fb->modifier) ||
-               (main_plane && main_plane >= fb->format->num_planes / 2));
+       drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) ||
+                   (main_plane && main_plane >= fb->format->num_planes / 2));
 
        return fb->format->num_planes / 2 + main_plane;
 }
 
 static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane)
 {
-       WARN_ON(!is_ccs_modifier(fb->modifier) ||
-               ccs_plane < fb->format->num_planes / 2);
+       drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) ||
+                   ccs_plane < fb->format->num_planes / 2);
 
        return ccs_plane - fb->format->num_planes / 2;
 }
@@ -2910,6 +2914,7 @@ intel_fb_plane_get_subsampling(int *hsub, int *vsub,
 static int
 intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y)
 {
+       struct drm_i915_private *i915 = to_i915(fb->dev);
        struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
        int main_plane;
        int hsub, vsub;
@@ -2938,7 +2943,8 @@ intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y)
         * x/y offsets must match between CCS and the main surface.
         */
        if (main_x != ccs_x || main_y != ccs_y) {
-               DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
+               drm_dbg_kms(&i915->drm,
+                             "Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
                              main_x, main_y,
                              ccs_x, ccs_y,
                              intel_fb->normal[main_plane].x,
@@ -2986,7 +2992,7 @@ setup_fb_rotation(int plane, const struct intel_remapped_plane_info *plane_info,
            fb->modifier != I915_FORMAT_MOD_Yf_TILED)
                return 0;
 
-       if (WARN_ON(plane >= ARRAY_SIZE(rot_info->plane)))
+       if (drm_WARN_ON(fb->dev, plane >= ARRAY_SIZE(rot_info->plane)))
                return 0;
 
        rot_info->plane[plane] = *plane_info;
@@ -3336,6 +3342,8 @@ int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
                return DRM_FORMAT_RGB565;
        case PLANE_CTL_FORMAT_NV12:
                return DRM_FORMAT_NV12;
+       case PLANE_CTL_FORMAT_XYUV:
+               return DRM_FORMAT_XYUV8888;
        case PLANE_CTL_FORMAT_P010:
                return DRM_FORMAT_P010;
        case PLANE_CTL_FORMAT_P012:
@@ -4580,6 +4588,8 @@ static u32 skl_plane_ctl_format(u32 pixel_format)
        case DRM_FORMAT_XRGB16161616F:
        case DRM_FORMAT_ARGB16161616F:
                return PLANE_CTL_FORMAT_XRGB_16161616F;
+       case DRM_FORMAT_XYUV8888:
+               return PLANE_CTL_FORMAT_XYUV;
        case DRM_FORMAT_YUYV:
                return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
        case DRM_FORMAT_YVYU:
@@ -4998,37 +5008,6 @@ static void icl_set_pipe_chicken(struct intel_crtc *crtc)
        intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
 }
 
-static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       u32 trans_ddi_func_ctl2_val;
-       u8 master_select;
-
-       /*
-        * Configure the master select and enable Transcoder Port Sync for
-        * Slave CRTCs transcoder.
-        */
-       if (crtc_state->master_transcoder == INVALID_TRANSCODER)
-               return;
-
-       if (crtc_state->master_transcoder == TRANSCODER_EDP)
-               master_select = 0;
-       else
-               master_select = crtc_state->master_transcoder + 1;
-
-       /* Set the master select bits for Tranascoder Port Sync */
-       trans_ddi_func_ctl2_val = (PORT_SYNC_MODE_MASTER_SELECT(master_select) &
-                                  PORT_SYNC_MODE_MASTER_SELECT_MASK) <<
-               PORT_SYNC_MODE_MASTER_SELECT_SHIFT;
-       /* Enable Transcoder Port Sync */
-       trans_ddi_func_ctl2_val |= PORT_SYNC_MODE_ENABLE;
-
-       intel_de_write(dev_priv,
-                      TRANS_DDI_FUNC_CTL2(crtc_state->cpu_transcoder),
-                      trans_ddi_func_ctl2_val);
-}
-
 static void intel_fdi_normal_train(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
@@ -6110,30 +6089,26 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
        return 0;
 }
 
-/**
- * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
- *
- * @state: crtc's scaler state
- *
- * Return
- *     0 - scaler_usage updated successfully
- *    error - requested scaling cannot be supported or other error condition
- */
-int skl_update_scaler_crtc(struct intel_crtc_state *state)
+static int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state)
 {
-       const struct drm_display_mode *adjusted_mode = &state->hw.adjusted_mode;
-       bool need_scaler = false;
+       const struct drm_display_mode *adjusted_mode =
+               &crtc_state->hw.adjusted_mode;
+       int width, height;
 
-       if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
-           state->pch_pfit.enabled)
-               need_scaler = true;
+       if (crtc_state->pch_pfit.enabled) {
+               width = drm_rect_width(&crtc_state->pch_pfit.dst);
+               height = drm_rect_height(&crtc_state->pch_pfit.dst);
+       } else {
+               width = adjusted_mode->crtc_hdisplay;
+               height = adjusted_mode->crtc_vdisplay;
+       }
 
-       return skl_update_scaler(state, !state->hw.active, SKL_CRTC_INDEX,
-                                &state->scaler_state.scaler_id,
-                                state->pipe_src_w, state->pipe_src_h,
-                                adjusted_mode->crtc_hdisplay,
-                                adjusted_mode->crtc_vdisplay, NULL, 0,
-                                need_scaler);
+       return skl_update_scaler(crtc_state, !crtc_state->hw.active,
+                                SKL_CRTC_INDEX,
+                                &crtc_state->scaler_state.scaler_id,
+                                crtc_state->pipe_src_w, crtc_state->pipe_src_h,
+                                width, height, NULL, 0,
+                                crtc_state->pch_pfit.enabled);
 }
 
 /**
@@ -6200,6 +6175,7 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
        case DRM_FORMAT_UYVY:
        case DRM_FORMAT_VYUY:
        case DRM_FORMAT_NV12:
+       case DRM_FORMAT_XYUV8888:
        case DRM_FORMAT_P010:
        case DRM_FORMAT_P012:
        case DRM_FORMAT_P016:
@@ -6241,70 +6217,80 @@ static void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
 {
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       enum pipe pipe = crtc->pipe;
        const struct intel_crtc_scaler_state *scaler_state =
                &crtc_state->scaler_state;
+       struct drm_rect src = {
+               .x2 = crtc_state->pipe_src_w << 16,
+               .y2 = crtc_state->pipe_src_h << 16,
+       };
+       const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
+       u16 uv_rgb_hphase, uv_rgb_vphase;
+       enum pipe pipe = crtc->pipe;
+       int width = drm_rect_width(dst);
+       int height = drm_rect_height(dst);
+       int x = dst->x1;
+       int y = dst->y1;
+       int hscale, vscale;
+       unsigned long irqflags;
+       int id;
 
-       if (crtc_state->pch_pfit.enabled) {
-               u16 uv_rgb_hphase, uv_rgb_vphase;
-               int pfit_w, pfit_h, hscale, vscale;
-               unsigned long irqflags;
-               int id;
-
-               if (drm_WARN_ON(&dev_priv->drm,
-                               crtc_state->scaler_state.scaler_id < 0))
-                       return;
+       if (!crtc_state->pch_pfit.enabled)
+               return;
 
-               pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
-               pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
+       if (drm_WARN_ON(&dev_priv->drm,
+                       crtc_state->scaler_state.scaler_id < 0))
+               return;
 
-               hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
-               vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
+       hscale = drm_rect_calc_hscale(&src, dst, 0, INT_MAX);
+       vscale = drm_rect_calc_vscale(&src, dst, 0, INT_MAX);
 
-               uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
-               uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
+       uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
+       uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
 
-               id = scaler_state->scaler_id;
+       id = scaler_state->scaler_id;
 
-               spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
-               intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
-                                 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
-               intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id),
-                                 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
-               intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id),
-                                 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
-               intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id),
-                                 crtc_state->pch_pfit.pos);
-               intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id),
-                                 crtc_state->pch_pfit.size);
+       intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
+                         PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
+       intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id),
+                         PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
+       intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id),
+                         PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
+       intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id),
+                         x << 16 | y);
+       intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id),
+                         width << 16 | height);
 
-               spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-       }
+       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
 {
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
        enum pipe pipe = crtc->pipe;
+       int width = drm_rect_width(dst);
+       int height = drm_rect_height(dst);
+       int x = dst->x1;
+       int y = dst->y1;
 
-       if (crtc_state->pch_pfit.enabled) {
-               /* Force use of hard-coded filter coefficients
-                * as some pre-programmed values are broken,
-                * e.g. x201.
-                */
-               if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
-                       intel_de_write(dev_priv, PF_CTL(pipe),
-                                      PF_ENABLE | PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
-               else
-                       intel_de_write(dev_priv, PF_CTL(pipe),
-                                      PF_ENABLE | PF_FILTER_MED_3x3);
-               intel_de_write(dev_priv, PF_WIN_POS(pipe),
-                              crtc_state->pch_pfit.pos);
-               intel_de_write(dev_priv, PF_WIN_SZ(pipe),
-                              crtc_state->pch_pfit.size);
-       }
+       if (!crtc_state->pch_pfit.enabled)
+               return;
+
+       /* Force use of hard-coded filter coefficients
+        * as some pre-programmed values are broken,
+        * e.g. x201.
+        */
+       if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
+               intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
+                              PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
+       else
+               intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
+                              PF_FILTER_MED_3x3);
+       intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
+       intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
 }
 
 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
@@ -6463,8 +6449,8 @@ static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
 {
        struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 
-       /* Wa_2006604312:icl */
-       if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv))
+       /* Wa_2006604312:icl,ehl */
+       if (crtc_state->scaler_state.scaler_users > 0 && IS_GEN(dev_priv, 11))
                return true;
 
        return false;
@@ -6534,7 +6520,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
            needs_nv12_wa(new_crtc_state))
                skl_wa_827(dev_priv, pipe, true);
 
-       /* Wa_2006604312:icl */
+       /* Wa_2006604312:icl,ehl */
        if (!needs_scalerclk_wa(old_crtc_state) &&
            needs_scalerclk_wa(new_crtc_state))
                icl_wa_scalerclkgating(dev_priv, pipe, true);
@@ -6646,7 +6632,7 @@ intel_connector_primary_encoder(struct intel_connector *connector)
                return &dp_to_dig_port(connector->mst_port)->base;
 
        encoder = intel_attached_encoder(connector);
-       WARN_ON(!encoder);
+       drm_WARN_ON(connector->base.dev, !encoder);
 
        return encoder;
 }
@@ -6720,7 +6706,8 @@ static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
                        continue;
 
                if (encoder->pre_pll_enable)
-                       encoder->pre_pll_enable(encoder, crtc_state, conn_state);
+                       encoder->pre_pll_enable(state, encoder,
+                                               crtc_state, conn_state);
        }
 }
 
@@ -6741,7 +6728,8 @@ static void intel_encoders_pre_enable(struct intel_atomic_state *state,
                        continue;
 
                if (encoder->pre_enable)
-                       encoder->pre_enable(encoder, crtc_state, conn_state);
+                       encoder->pre_enable(state, encoder,
+                                           crtc_state, conn_state);
        }
 }
 
@@ -6762,7 +6750,8 @@ static void intel_encoders_enable(struct intel_atomic_state *state,
                        continue;
 
                if (encoder->enable)
-                       encoder->enable(encoder, crtc_state, conn_state);
+                       encoder->enable(state, encoder,
+                                       crtc_state, conn_state);
                intel_opregion_notify_encoder(encoder, true);
        }
 }
@@ -6785,7 +6774,8 @@ static void intel_encoders_disable(struct intel_atomic_state *state,
 
                intel_opregion_notify_encoder(encoder, false);
                if (encoder->disable)
-                       encoder->disable(encoder, old_crtc_state, old_conn_state);
+                       encoder->disable(state, encoder,
+                                        old_crtc_state, old_conn_state);
        }
 }
 
@@ -6806,7 +6796,8 @@ static void intel_encoders_post_disable(struct intel_atomic_state *state,
                        continue;
 
                if (encoder->post_disable)
-                       encoder->post_disable(encoder, old_crtc_state, old_conn_state);
+                       encoder->post_disable(state, encoder,
+                                             old_crtc_state, old_conn_state);
        }
 }
 
@@ -6827,7 +6818,8 @@ static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
                        continue;
 
                if (encoder->post_pll_disable)
-                       encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
+                       encoder->post_pll_disable(state, encoder,
+                                                 old_crtc_state, old_conn_state);
        }
 }
 
@@ -6848,7 +6840,8 @@ static void intel_encoders_update_pipe(struct intel_atomic_state *state,
                        continue;
 
                if (encoder->update_pipe)
-                       encoder->update_pipe(encoder, crtc_state, conn_state);
+                       encoder->update_pipe(state, encoder,
+                                            crtc_state, conn_state);
        }
 }
 
@@ -7037,9 +7030,6 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
        if (!transcoder_is_dsi(cpu_transcoder))
                intel_set_pipe_timings(new_crtc_state);
 
-       if (INTEL_GEN(dev_priv) >= 11)
-               icl_enable_trans_port_sync(new_crtc_state);
-
        intel_set_pipe_src_size(new_crtc_state);
 
        if (cpu_transcoder != TRANSCODER_EDP &&
@@ -7087,9 +7077,6 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
        if (INTEL_GEN(dev_priv) >= 11)
                icl_set_pipe_chicken(crtc);
 
-       if (!transcoder_is_dsi(cpu_transcoder))
-               intel_ddi_enable_transcoder_func(new_crtc_state);
-
        if (dev_priv->display.initial_watermarks)
                dev_priv->display.initial_watermarks(state, crtc);
 
@@ -7120,11 +7107,12 @@ void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
 
        /* To avoid upsetting the power well on haswell only disable the pfit if
         * it's in use. The hw state code will make sure we get this right. */
-       if (old_crtc_state->pch_pfit.enabled) {
-               intel_de_write(dev_priv, PF_CTL(pipe), 0);
-               intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
-               intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
-       }
+       if (!old_crtc_state->pch_pfit.enabled)
+               return;
+
+       intel_de_write(dev_priv, PF_CTL(pipe), 0);
+       intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
+       intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
 }
 
 static void ilk_crtc_disable(struct intel_atomic_state *state,
@@ -7312,7 +7300,17 @@ intel_aux_power_domain(struct intel_digital_port *dig_port)
                }
        }
 
-       switch (dig_port->aux_ch) {
+       return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
+}
+
+/*
+ * Converts aux_ch to power_domain without caring about TBT ports for that use
+ * intel_aux_power_domain()
+ */
+enum intel_display_power_domain
+intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)
+{
+       switch (aux_ch) {
        case AUX_CH_A:
                return POWER_DOMAIN_AUX_A;
        case AUX_CH_B:
@@ -7328,7 +7326,7 @@ intel_aux_power_domain(struct intel_digital_port *dig_port)
        case AUX_CH_G:
                return POWER_DOMAIN_AUX_G;
        default:
-               MISSING_CASE(dig_port->aux_ch);
+               MISSING_CASE(aux_ch);
                return POWER_DOMAIN_AUX_A;
        }
 }
@@ -7942,39 +7940,36 @@ static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
                (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
 }
 
-static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
+static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
 {
-       u32 pixel_rate;
-
-       pixel_rate = pipe_config->hw.adjusted_mode.crtc_clock;
+       u32 pixel_rate = crtc_state->hw.adjusted_mode.crtc_clock;
+       unsigned int pipe_w, pipe_h, pfit_w, pfit_h;
 
        /*
         * We only use IF-ID interlacing. If we ever use
         * PF-ID we'll need to adjust the pixel_rate here.
         */
 
-       if (pipe_config->pch_pfit.enabled) {
-               u64 pipe_w, pipe_h, pfit_w, pfit_h;
-               u32 pfit_size = pipe_config->pch_pfit.size;
+       if (!crtc_state->pch_pfit.enabled)
+               return pixel_rate;
 
-               pipe_w = pipe_config->pipe_src_w;
-               pipe_h = pipe_config->pipe_src_h;
+       pipe_w = crtc_state->pipe_src_w;
+       pipe_h = crtc_state->pipe_src_h;
 
-               pfit_w = (pfit_size >> 16) & 0xFFFF;
-               pfit_h = pfit_size & 0xFFFF;
-               if (pipe_w < pfit_w)
-                       pipe_w = pfit_w;
-               if (pipe_h < pfit_h)
-                       pipe_h = pfit_h;
+       pfit_w = drm_rect_width(&crtc_state->pch_pfit.dst);
+       pfit_h = drm_rect_height(&crtc_state->pch_pfit.dst);
 
-               if (WARN_ON(!pfit_w || !pfit_h))
-                       return pixel_rate;
+       if (pipe_w < pfit_w)
+               pipe_w = pfit_w;
+       if (pipe_h < pfit_h)
+               pipe_h = pfit_h;
 
-               pixel_rate = div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
-                                    pfit_w * pfit_h);
-       }
+       if (drm_WARN_ON(crtc_state->uapi.crtc->dev,
+                       !pfit_w || !pfit_h))
+               return pixel_rate;
 
-       return pixel_rate;
+       return div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
+                      pfit_w * pfit_h);
 }
 
 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
@@ -8143,7 +8138,7 @@ static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
        }
 }
 
-static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
+static bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
 {
        if (i915_modparams.panel_use_ssc >= 0)
                return i915_modparams.panel_use_ssc != 0;
@@ -8891,7 +8886,6 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
 
        mode->clock = pipe_config->hw.adjusted_mode.crtc_clock;
 
-       mode->hsync = drm_mode_hsync(mode);
        mode->vrefresh = drm_mode_vrefresh(mode);
        drm_mode_set_name(mode);
 }
@@ -9168,9 +9162,9 @@ static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
                IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
 }
 
-static void i9xx_get_pfit_config(struct intel_crtc *crtc,
-                                struct intel_crtc_state *pipe_config)
+static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
 {
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        u32 tmp;
 
@@ -9190,9 +9184,9 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
                        return;
        }
 
-       pipe_config->gmch_pfit.control = tmp;
-       pipe_config->gmch_pfit.pgm_ratios = intel_de_read(dev_priv,
-                                                         PFIT_PGM_RATIOS);
+       crtc_state->gmch_pfit.control = tmp;
+       crtc_state->gmch_pfit.pgm_ratios =
+               intel_de_read(dev_priv, PFIT_PGM_RATIOS);
 }
 
 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
@@ -9398,7 +9392,6 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
        pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
        pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
        pipe_config->shared_dpll = NULL;
-       pipe_config->master_transcoder = INVALID_TRANSCODER;
 
        ret = false;
 
@@ -9443,7 +9436,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
        intel_get_pipe_timings(crtc, pipe_config);
        intel_get_pipe_src_size(crtc, pipe_config);
 
-       i9xx_get_pfit_config(crtc, pipe_config);
+       i9xx_get_pfit_config(pipe_config);
 
        if (INTEL_GEN(dev_priv) >= 4) {
                /* No way to read it out on pipes B and C */
@@ -10413,37 +10406,47 @@ static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
                                     &pipe_config->fdi_m_n, NULL);
 }
 
-static void skl_get_pfit_config(struct intel_crtc *crtc,
-                               struct intel_crtc_state *pipe_config)
+static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
+                                 u32 pos, u32 size)
 {
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
-       u32 ps_ctrl = 0;
+       drm_rect_init(&crtc_state->pch_pfit.dst,
+                     pos >> 16, pos & 0xffff,
+                     size >> 16, size & 0xffff);
+}
+
+static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
        int id = -1;
        int i;
 
        /* find scaler attached to this pipe */
        for (i = 0; i < crtc->num_scalers; i++) {
-               ps_ctrl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
-               if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
-                       id = i;
-                       pipe_config->pch_pfit.enabled = true;
-                       pipe_config->pch_pfit.pos = intel_de_read(dev_priv,
-                                                                 SKL_PS_WIN_POS(crtc->pipe, i));
-                       pipe_config->pch_pfit.size = intel_de_read(dev_priv,
-                                                                  SKL_PS_WIN_SZ(crtc->pipe, i));
-                       scaler_state->scalers[i].in_use = true;
-                       break;
-               }
+               u32 ctl, pos, size;
+
+               ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
+               if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
+                       continue;
+
+               id = i;
+               crtc_state->pch_pfit.enabled = true;
+
+               pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
+               size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
+
+               ilk_get_pfit_pos_size(crtc_state, pos, size);
+
+               scaler_state->scalers[i].in_use = true;
+               break;
        }
 
        scaler_state->scaler_id = id;
-       if (id >= 0) {
+       if (id >= 0)
                scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
-       } else {
+       else
                scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
-       }
 }
 
 static void
@@ -10579,30 +10582,30 @@ error:
        kfree(intel_fb);
 }
 
-static void ilk_get_pfit_config(struct intel_crtc *crtc,
-                               struct intel_crtc_state *pipe_config)
+static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
 {
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       u32 tmp;
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       u32 ctl, pos, size;
 
-       tmp = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
-
-       if (tmp & PF_ENABLE) {
-               pipe_config->pch_pfit.enabled = true;
-               pipe_config->pch_pfit.pos = intel_de_read(dev_priv,
-                                                         PF_WIN_POS(crtc->pipe));
-               pipe_config->pch_pfit.size = intel_de_read(dev_priv,
-                                                          PF_WIN_SZ(crtc->pipe));
-
-               /* We currently do not free assignements of panel fitters on
-                * ivb/hsw (since we don't use the higher upscaling modes which
-                * differentiates them) so just WARN about this case for now. */
-               if (IS_GEN(dev_priv, 7)) {
-                       drm_WARN_ON(dev, (tmp & PF_PIPE_SEL_MASK_IVB) !=
-                                   PF_PIPE_SEL_IVB(crtc->pipe));
-               }
-       }
+       ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
+       if ((ctl & PF_ENABLE) == 0)
+               return;
+
+       crtc_state->pch_pfit.enabled = true;
+
+       pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
+       size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
+
+       ilk_get_pfit_pos_size(crtc_state, pos, size);
+
+       /*
+        * We currently do not free assignements of panel fitters on
+        * ivb/hsw (since we don't use the higher upscaling modes which
+        * differentiates them) so just WARN about this case for now.
+        */
+       drm_WARN_ON(&dev_priv->drm, IS_GEN(dev_priv, 7) &&
+                   (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
 }
 
 static bool ilk_get_pipe_config(struct intel_crtc *crtc,
@@ -10622,7 +10625,6 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
 
        pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
        pipe_config->shared_dpll = NULL;
-       pipe_config->master_transcoder = INVALID_TRANSCODER;
 
        ret = false;
        tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
@@ -10714,7 +10716,7 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
        intel_get_pipe_timings(crtc, pipe_config);
        intel_get_pipe_src_size(crtc, pipe_config);
 
-       ilk_get_pfit_config(crtc, pipe_config);
+       ilk_get_pfit_config(pipe_config);
 
        ret = true;
 
@@ -10891,7 +10893,7 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
                panel_transcoder_mask |=
                        BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
 
-       if (HAS_TRANSCODER_EDP(dev_priv))
+       if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP))
                panel_transcoder_mask |= BIT(TRANSCODER_EDP);
 
        /*
@@ -11085,61 +11087,6 @@ static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
        }
 }
 
-static enum transcoder transcoder_master_readout(struct drm_i915_private *dev_priv,
-                                                enum transcoder cpu_transcoder)
-{
-       u32 trans_port_sync, master_select;
-
-       trans_port_sync = intel_de_read(dev_priv,
-                                       TRANS_DDI_FUNC_CTL2(cpu_transcoder));
-
-       if ((trans_port_sync & PORT_SYNC_MODE_ENABLE) == 0)
-               return INVALID_TRANSCODER;
-
-       master_select = trans_port_sync &
-                       PORT_SYNC_MODE_MASTER_SELECT_MASK;
-       if (master_select == 0)
-               return TRANSCODER_EDP;
-       else
-               return master_select - 1;
-}
-
-static void icl_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
-       u32 transcoders;
-       enum transcoder cpu_transcoder;
-
-       crtc_state->master_transcoder = transcoder_master_readout(dev_priv,
-                                                                 crtc_state->cpu_transcoder);
-
-       transcoders = BIT(TRANSCODER_A) |
-               BIT(TRANSCODER_B) |
-               BIT(TRANSCODER_C) |
-               BIT(TRANSCODER_D);
-       for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
-               enum intel_display_power_domain power_domain;
-               intel_wakeref_t trans_wakeref;
-
-               power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
-               trans_wakeref = intel_display_power_get_if_enabled(dev_priv,
-                                                                  power_domain);
-
-               if (!trans_wakeref)
-                       continue;
-
-               if (transcoder_master_readout(dev_priv, cpu_transcoder) ==
-                   crtc_state->cpu_transcoder)
-                       crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder);
-
-               intel_display_power_put(dev_priv, power_domain, trans_wakeref);
-       }
-
-       drm_WARN_ON(&dev_priv->drm,
-                   crtc_state->master_transcoder != INVALID_TRANSCODER &&
-                   crtc_state->sync_mode_slaves_mask);
-}
-
 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
                                struct intel_crtc_state *pipe_config)
 {
@@ -11243,9 +11190,9 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
                power_domain_mask |= BIT_ULL(power_domain);
 
                if (INTEL_GEN(dev_priv) >= 9)
-                       skl_get_pfit_config(crtc, pipe_config);
+                       skl_get_pfit_config(pipe_config);
                else
-                       ilk_get_pfit_config(crtc, pipe_config);
+                       ilk_get_pfit_config(pipe_config);
        }
 
        if (hsw_crtc_supports_ips(crtc)) {
@@ -11271,10 +11218,6 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
                pipe_config->pixel_multiplier = 1;
        }
 
-       if (INTEL_GEN(dev_priv) >= 11 &&
-           !transcoder_is_dsi(pipe_config->cpu_transcoder))
-               icl_get_trans_port_sync_config(pipe_config);
-
 out:
        for_each_power_domain(power_domain, power_domain_mask)
                intel_display_power_put(dev_priv,
@@ -12377,10 +12320,8 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
         * only combine the results from all planes in the current place?
         */
        if (!is_crtc_enabled) {
-               plane_state->uapi.visible = visible = false;
-               crtc_state->active_planes &= ~BIT(plane->id);
-               crtc_state->data_rate[plane->id] = 0;
-               crtc_state->min_cdclk[plane->id] = 0;
+               intel_plane_set_invisible(crtc_state, plane_state);
+               visible = false;
        }
 
        if (!was_visible && !visible)
@@ -12510,8 +12451,10 @@ static int icl_add_linked_planes(struct intel_atomic_state *state)
                if (IS_ERR(linked_plane_state))
                        return PTR_ERR(linked_plane_state);
 
-               WARN_ON(linked_plane_state->planar_linked_plane != plane);
-               WARN_ON(linked_plane_state->planar_slave == plane_state->planar_slave);
+               drm_WARN_ON(state->base.dev,
+                           linked_plane_state->planar_linked_plane != plane);
+               drm_WARN_ON(state->base.dev,
+                           linked_plane_state->planar_slave == plane_state->planar_slave);
        }
 
        return 0;
@@ -12886,19 +12829,20 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
        return 0;
 }
 
-static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
+static void intel_dump_crtc_timings(struct drm_i915_private *i915,
+                                   const struct drm_display_mode *mode)
 {
-       DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
-                     "type: 0x%x flags: 0x%x\n",
-                     mode->crtc_clock,
-                     mode->crtc_hdisplay, mode->crtc_hsync_start,
-                     mode->crtc_hsync_end, mode->crtc_htotal,
-                     mode->crtc_vdisplay, mode->crtc_vsync_start,
-                     mode->crtc_vsync_end, mode->crtc_vtotal,
-                     mode->type, mode->flags);
+       drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
+                   "type: 0x%x flags: 0x%x\n",
+                   mode->crtc_clock,
+                   mode->crtc_hdisplay, mode->crtc_hsync_start,
+                   mode->crtc_hsync_end, mode->crtc_htotal,
+                   mode->crtc_vdisplay, mode->crtc_vsync_start,
+                   mode->crtc_vsync_end, mode->crtc_vtotal,
+                   mode->type, mode->flags);
 }
 
-static inline void
+static void
 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
                      const char *id, unsigned int lane_count,
                      const struct intel_link_m_n *m_n)
@@ -12922,6 +12866,16 @@ intel_dump_infoframe(struct drm_i915_private *dev_priv,
        hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
 }
 
+static void
+intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv,
+                     const struct drm_dp_vsc_sdp *vsc)
+{
+       if (!drm_debug_enabled(DRM_UT_KMS))
+               return;
+
+       drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc);
+}
+
 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
 
 static const char * const output_type_str[] = {
@@ -13042,6 +12996,11 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
                    transcoder_name(pipe_config->cpu_transcoder),
                    pipe_config->pipe_bpp, pipe_config->dither);
 
+       drm_dbg_kms(&dev_priv->drm,
+                   "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
+                   transcoder_name(pipe_config->master_transcoder),
+                   pipe_config->sync_mode_slaves_mask);
+
        if (pipe_config->has_pch_encoder)
                intel_dump_m_n_config(pipe_config, "fdi",
                                      pipe_config->fdi_lanes,
@@ -13074,12 +13033,21 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
        if (pipe_config->infoframes.enable &
            intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
                intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
+       if (pipe_config->infoframes.enable &
+           intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
+               intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
+       if (pipe_config->infoframes.enable &
+           intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
+               intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
+       if (pipe_config->infoframes.enable &
+           intel_hdmi_infoframe_enable(DP_SDP_VSC))
+               intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
 
        drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
        drm_mode_debug_printmodeline(&pipe_config->hw.mode);
        drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
        drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
-       intel_dump_crtc_timings(&pipe_config->hw.adjusted_mode);
+       intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode);
        drm_dbg_kms(&dev_priv->drm,
                    "port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
                    pipe_config->port_clock,
@@ -13104,9 +13072,8 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
                            pipe_config->gmch_pfit.lvds_border_bits);
        else
                drm_dbg_kms(&dev_priv->drm,
-                           "pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n",
-                           pipe_config->pch_pfit.pos,
-                           pipe_config->pch_pfit.size,
+                           "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
+                           DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
                            enableddisabled(pipe_config->pch_pfit.enabled),
                            yesno(pipe_config->pch_pfit.force_thru));
 
@@ -13228,7 +13195,8 @@ static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state
 {
        crtc_state->uapi.enable = crtc_state->hw.enable;
        crtc_state->uapi.active = crtc_state->hw.active;
-       WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
+       drm_WARN_ON(crtc_state->uapi.crtc->dev,
+                   drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
 
        crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
 
@@ -13521,6 +13489,13 @@ intel_compare_infoframe(const union hdmi_infoframe *a,
        return memcmp(a, b, sizeof(*a)) == 0;
 }
 
+static bool
+intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
+                        const struct drm_dp_vsc_sdp *b)
+{
+       return memcmp(a, b, sizeof(*a)) == 0;
+}
+
 static void
 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
                               bool fastset, const char *name,
@@ -13546,6 +13521,31 @@ pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
        }
 }
 
+static void
+pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
+                               bool fastset, const char *name,
+                               const struct drm_dp_vsc_sdp *a,
+                               const struct drm_dp_vsc_sdp *b)
+{
+       if (fastset) {
+               if (!drm_debug_enabled(DRM_UT_KMS))
+                       return;
+
+               drm_dbg_kms(&dev_priv->drm,
+                           "fastset mismatch in %s dp sdp\n", name);
+               drm_dbg_kms(&dev_priv->drm, "expected:\n");
+               drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
+               drm_dbg_kms(&dev_priv->drm, "found:\n");
+               drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
+       } else {
+               drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
+               drm_err(&dev_priv->drm, "expected:\n");
+               drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
+               drm_err(&dev_priv->drm, "found:\n");
+               drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
+       }
+}
+
 static void __printf(4, 5)
 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
                     const char *name, const char *format, ...)
@@ -13747,6 +13747,17 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
        } \
 } while (0)
 
+#define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
+       if (!current_config->has_psr && !pipe_config->has_psr && \
+           !intel_compare_dp_vsc_sdp(&current_config->infoframes.name, \
+                                     &pipe_config->infoframes.name)) { \
+               pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
+                                               &current_config->infoframes.name, \
+                                               &pipe_config->infoframes.name); \
+               ret = false; \
+       } \
+} while (0)
+
 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
        if (current_config->name1 != pipe_config->name1) { \
                pipe_config_mismatch(fastset, crtc, __stringify(name1), \
@@ -13847,8 +13858,10 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
 
                PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
                if (current_config->pch_pfit.enabled) {
-                       PIPE_CONF_CHECK_X(pch_pfit.pos);
-                       PIPE_CONF_CHECK_X(pch_pfit.size);
+                       PIPE_CONF_CHECK_I(pch_pfit.dst.x1);
+                       PIPE_CONF_CHECK_I(pch_pfit.dst.y1);
+                       PIPE_CONF_CHECK_I(pch_pfit.dst.x2);
+                       PIPE_CONF_CHECK_I(pch_pfit.dst.y2);
                }
 
                PIPE_CONF_CHECK_I(scaler_state.scaler_id);
@@ -13922,6 +13935,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
        PIPE_CONF_CHECK_INFOFRAME(spd);
        PIPE_CONF_CHECK_INFOFRAME(hdmi);
        PIPE_CONF_CHECK_INFOFRAME(drm);
+       PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
 
        PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
        PIPE_CONF_CHECK_I(master_transcoder);
@@ -14010,7 +14024,9 @@ static void verify_wm_state(struct intel_crtc *crtc,
                /* Watermarks */
                for (level = 0; level <= max_level; level++) {
                        if (skl_wm_level_equals(&hw_plane_wm->wm[level],
-                                               &sw_plane_wm->wm[level]))
+                                               &sw_plane_wm->wm[level]) ||
+                           (level == 0 && skl_wm_level_equals(&hw_plane_wm->wm[level],
+                                                              &sw_plane_wm->sagv_wm0)))
                                continue;
 
                        drm_err(&dev_priv->drm,
@@ -14065,7 +14081,9 @@ static void verify_wm_state(struct intel_crtc *crtc,
                /* Watermarks */
                for (level = 0; level <= max_level; level++) {
                        if (skl_wm_level_equals(&hw_plane_wm->wm[level],
-                                               &sw_plane_wm->wm[level]))
+                                               &sw_plane_wm->wm[level]) ||
+                           (level == 0 && skl_wm_level_equals(&hw_plane_wm->wm[level],
+                                                              &sw_plane_wm->sagv_wm0)))
                                continue;
 
                        drm_err(&dev_priv->drm,
@@ -14999,11 +15017,13 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
 }
 
 static void commit_pipe_config(struct intel_atomic_state *state,
-                              struct intel_crtc_state *old_crtc_state,
-                              struct intel_crtc_state *new_crtc_state)
+                              struct intel_crtc *crtc)
 {
-       struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
        struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+       const struct intel_crtc_state *old_crtc_state =
+               intel_atomic_get_old_crtc_state(state, crtc);
+       const struct intel_crtc_state *new_crtc_state =
+               intel_atomic_get_new_crtc_state(state, crtc);
        bool modeset = needs_modeset(new_crtc_state);
 
        /*
@@ -15029,22 +15049,35 @@ static void commit_pipe_config(struct intel_atomic_state *state,
                dev_priv->display.atomic_update_watermarks(state, crtc);
 }
 
-static void intel_update_crtc(struct intel_crtc *crtc,
-                             struct intel_atomic_state *state,
-                             struct intel_crtc_state *old_crtc_state,
-                             struct intel_crtc_state *new_crtc_state)
+static void intel_enable_crtc(struct intel_atomic_state *state,
+                             struct intel_crtc *crtc)
 {
        struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-       bool modeset = needs_modeset(new_crtc_state);
+       const struct intel_crtc_state *new_crtc_state =
+               intel_atomic_get_new_crtc_state(state, crtc);
 
-       if (modeset) {
-               intel_crtc_update_active_timings(new_crtc_state);
+       if (!needs_modeset(new_crtc_state))
+               return;
 
-               dev_priv->display.crtc_enable(state, crtc);
+       intel_crtc_update_active_timings(new_crtc_state);
 
-               /* vblanks work again, re-enable pipe CRC. */
-               intel_crtc_enable_pipe_crc(crtc);
-       } else {
+       dev_priv->display.crtc_enable(state, crtc);
+
+       /* vblanks work again, re-enable pipe CRC. */
+       intel_crtc_enable_pipe_crc(crtc);
+}
+
+static void intel_update_crtc(struct intel_atomic_state *state,
+                             struct intel_crtc *crtc)
+{
+       struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+       const struct intel_crtc_state *old_crtc_state =
+               intel_atomic_get_old_crtc_state(state, crtc);
+       struct intel_crtc_state *new_crtc_state =
+               intel_atomic_get_new_crtc_state(state, crtc);
+       bool modeset = needs_modeset(new_crtc_state);
+
+       if (!modeset) {
                if (new_crtc_state->preload_luts &&
                    (new_crtc_state->uapi.color_mgmt_changed ||
                     new_crtc_state->update_pipe))
@@ -15064,7 +15097,7 @@ static void intel_update_crtc(struct intel_crtc *crtc,
        /* Perform vblank evasion around commit operation */
        intel_pipe_update_start(new_crtc_state);
 
-       commit_pipe_config(state, old_crtc_state, new_crtc_state);
+       commit_pipe_config(state, crtc);
 
        if (INTEL_GEN(dev_priv) >= 9)
                skl_update_planes_on_crtc(state, crtc);
@@ -15084,18 +15117,6 @@ static void intel_update_crtc(struct intel_crtc *crtc,
                intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
 }
 
-static struct intel_crtc *intel_get_slave_crtc(const struct intel_crtc_state *new_crtc_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
-       enum transcoder slave_transcoder;
-
-       drm_WARN_ON(&dev_priv->drm,
-                   !is_power_of_2(new_crtc_state->sync_mode_slaves_mask));
-
-       slave_transcoder = ffs(new_crtc_state->sync_mode_slaves_mask) - 1;
-       return intel_get_crtc_for_pipe(dev_priv,
-                                      (enum pipe)slave_transcoder);
-}
 
 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
                                          struct intel_crtc_state *old_crtc_state,
@@ -15171,129 +15192,19 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state)
 
 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
 {
+       struct intel_crtc_state *new_crtc_state;
        struct intel_crtc *crtc;
-       struct intel_crtc_state *old_crtc_state, *new_crtc_state;
        int i;
 
-       for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+       for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
                if (!new_crtc_state->hw.active)
                        continue;
 
-               intel_update_crtc(crtc, state, old_crtc_state,
-                                 new_crtc_state);
+               intel_enable_crtc(state, crtc);
+               intel_update_crtc(state, crtc);
        }
 }
 
-static void intel_crtc_enable_trans_port_sync(struct intel_crtc *crtc,
-                                             struct intel_atomic_state *state,
-                                             struct intel_crtc_state *new_crtc_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-
-       intel_crtc_update_active_timings(new_crtc_state);
-       dev_priv->display.crtc_enable(state, crtc);
-       intel_crtc_enable_pipe_crc(crtc);
-}
-
-static void intel_set_dp_tp_ctl_normal(struct intel_crtc *crtc,
-                                      struct intel_atomic_state *state)
-{
-       struct drm_connector *uninitialized_var(conn);
-       struct drm_connector_state *conn_state;
-       struct intel_dp *intel_dp;
-       int i;
-
-       for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
-               if (conn_state->crtc == &crtc->base)
-                       break;
-       }
-       intel_dp = intel_attached_dp(to_intel_connector(conn));
-       intel_dp_stop_link_train(intel_dp);
-}
-
-/*
- * TODO: This is only called from port sync and it is identical to what will be
- * executed again in intel_update_crtc() over port sync pipes
- */
-static void intel_post_crtc_enable_updates(struct intel_crtc *crtc,
-                                          struct intel_atomic_state *state)
-{
-       struct intel_crtc_state *new_crtc_state =
-               intel_atomic_get_new_crtc_state(state, crtc);
-       struct intel_crtc_state *old_crtc_state =
-               intel_atomic_get_old_crtc_state(state, crtc);
-       bool modeset = needs_modeset(new_crtc_state);
-
-       if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
-               intel_fbc_disable(crtc);
-       else
-               intel_fbc_enable(state, crtc);
-
-       /* Perform vblank evasion around commit operation */
-       intel_pipe_update_start(new_crtc_state);
-       commit_pipe_config(state, old_crtc_state, new_crtc_state);
-       skl_update_planes_on_crtc(state, crtc);
-       intel_pipe_update_end(new_crtc_state);
-
-       /*
-        * We usually enable FIFO underrun interrupts as part of the
-        * CRTC enable sequence during modesets.  But when we inherit a
-        * valid pipe configuration from the BIOS we need to take care
-        * of enabling them on the CRTC's first fastset.
-        */
-       if (new_crtc_state->update_pipe && !modeset &&
-           old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
-               intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
-}
-
-static void intel_update_trans_port_sync_crtcs(struct intel_crtc *crtc,
-                                              struct intel_atomic_state *state,
-                                              struct intel_crtc_state *old_crtc_state,
-                                              struct intel_crtc_state *new_crtc_state)
-{
-       struct drm_i915_private *i915 = to_i915(crtc->base.dev);
-       struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state);
-       struct intel_crtc_state *new_slave_crtc_state =
-               intel_atomic_get_new_crtc_state(state, slave_crtc);
-       struct intel_crtc_state *old_slave_crtc_state =
-               intel_atomic_get_old_crtc_state(state, slave_crtc);
-
-       drm_WARN_ON(&i915->drm, !slave_crtc || !new_slave_crtc_state ||
-                   !old_slave_crtc_state);
-
-       drm_dbg_kms(&i915->drm,
-                   "Updating Transcoder Port Sync Master CRTC = %d %s and Slave CRTC %d %s\n",
-                   crtc->base.base.id, crtc->base.name,
-                   slave_crtc->base.base.id, slave_crtc->base.name);
-
-       /* Enable seq for slave with with DP_TP_CTL left Idle until the
-        * master is ready
-        */
-       intel_crtc_enable_trans_port_sync(slave_crtc,
-                                         state,
-                                         new_slave_crtc_state);
-
-       /* Enable seq for master with with DP_TP_CTL left Idle */
-       intel_crtc_enable_trans_port_sync(crtc,
-                                         state,
-                                         new_crtc_state);
-
-       /* Set Slave's DP_TP_CTL to Normal */
-       intel_set_dp_tp_ctl_normal(slave_crtc,
-                                  state);
-
-       /* Set Master's DP_TP_CTL To Normal */
-       usleep_range(200, 400);
-       intel_set_dp_tp_ctl_normal(crtc,
-                                  state);
-
-       /* Now do the post crtc enable for all master and slaves */
-       intel_post_crtc_enable_updates(slave_crtc,
-                                      state);
-       intel_post_crtc_enable_updates(crtc,
-                                      state);
-}
-
 static void icl_dbuf_slice_pre_update(struct intel_atomic_state *state)
 {
        struct drm_i915_private *dev_priv = to_i915(state->base.dev);
@@ -15365,8 +15276,7 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
                        entries[pipe] = new_crtc_state->wm.skl.ddb;
                        update_pipes &= ~BIT(pipe);
 
-                       intel_update_crtc(crtc, state, old_crtc_state,
-                                         new_crtc_state);
+                       intel_update_crtc(state, crtc);
 
                        /*
                         * If this is an already active pipe, it's DDB changed,
@@ -15381,67 +15291,62 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
                }
        }
 
+       update_pipes = modeset_pipes;
+
        /*
         * Enable all pipes that needs a modeset and do not depends on other
         * pipes
         */
-       for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
-                                           new_crtc_state, i) {
+       for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
                enum pipe pipe = crtc->pipe;
 
                if ((modeset_pipes & BIT(pipe)) == 0)
                        continue;
 
                if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
-                   is_trans_port_sync_slave(new_crtc_state))
+                   is_trans_port_sync_master(new_crtc_state))
                        continue;
 
-               drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
-                                                                       entries, I915_MAX_PIPES, pipe));
-
-               entries[pipe] = new_crtc_state->wm.skl.ddb;
                modeset_pipes &= ~BIT(pipe);
 
-               if (is_trans_port_sync_mode(new_crtc_state)) {
-                       struct intel_crtc *slave_crtc;
+               intel_enable_crtc(state, crtc);
+       }
 
-                       intel_update_trans_port_sync_crtcs(crtc, state,
-                                                          old_crtc_state,
-                                                          new_crtc_state);
+       /*
+        * Then we enable all remaining pipes that depend on other
+        * pipes: MST slaves and port sync masters.
+        */
+       for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+               enum pipe pipe = crtc->pipe;
 
-                       slave_crtc = intel_get_slave_crtc(new_crtc_state);
-                       /* TODO: update entries[] of slave */
-                       modeset_pipes &= ~BIT(slave_crtc->pipe);
+               if ((modeset_pipes & BIT(pipe)) == 0)
+                       continue;
 
-               } else {
-                       intel_update_crtc(crtc, state, old_crtc_state,
-                                         new_crtc_state);
-               }
+               modeset_pipes &= ~BIT(pipe);
+
+               intel_enable_crtc(state, crtc);
        }
 
        /*
-        * Finally enable all pipes that needs a modeset and depends on
-        * other pipes, right now it is only MST slaves as both port sync slave
-        * and master are enabled together
+        * Finally we do the plane updates/etc. for all pipes that got enabled.
         */
-       for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
-                                           new_crtc_state, i) {
+       for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
                enum pipe pipe = crtc->pipe;
 
-               if ((modeset_pipes & BIT(pipe)) == 0)
+               if ((update_pipes & BIT(pipe)) == 0)
                        continue;
 
                drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
                                                                        entries, I915_MAX_PIPES, pipe));
 
                entries[pipe] = new_crtc_state->wm.skl.ddb;
-               modeset_pipes &= ~BIT(pipe);
+               update_pipes &= ~BIT(pipe);
 
-               intel_update_crtc(crtc, state, old_crtc_state, new_crtc_state);
+               intel_update_crtc(state, crtc);
        }
 
        drm_WARN_ON(&dev_priv->drm, modeset_pipes);
-
+       drm_WARN_ON(&dev_priv->drm, update_pipes);
 }
 
 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
@@ -15540,16 +15445,11 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
 
                intel_set_cdclk_pre_plane_update(state);
 
-               /*
-                * SKL workaround: bspec recommends we disable the SAGV when we
-                * have more then one pipe enabled
-                */
-               if (!intel_can_enable_sagv(state))
-                       intel_disable_sagv(dev_priv);
-
                intel_modeset_verify_disabled(dev_priv, state);
        }
 
+       intel_sagv_pre_plane_update(state);
+
        /* Complete the events for pipes that have now been disabled */
        for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
                bool modeset = needs_modeset(new_crtc_state);
@@ -15645,8 +15545,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
        if (state->modeset)
                intel_verify_planes(state);
 
-       if (state->modeset && intel_can_enable_sagv(state))
-               intel_enable_sagv(dev_priv);
+       intel_sagv_post_plane_update(state);
 
        drm_atomic_helper_commit_hw_done(&state->base);
 
@@ -15982,7 +15881,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
        if (new_plane_state->uapi.fence) { /* explicit fencing */
                ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
                                                    new_plane_state->uapi.fence,
-                                                   I915_FENCE_TIMEOUT,
+                                                   i915_fence_timeout(dev_priv),
                                                    GFP_KERNEL);
                if (ret < 0)
                        return ret;
@@ -16009,7 +15908,8 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
 
                ret = i915_sw_fence_await_reservation(&state->commit_ready,
                                                      obj->base.resv, NULL,
-                                                     false, I915_FENCE_TIMEOUT,
+                                                     false,
+                                                     i915_fence_timeout(dev_priv),
                                                      GFP_KERNEL);
                if (ret < 0)
                        goto unpin_fb;
@@ -18261,11 +18161,12 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
                        best_encoder = connector->base.state->best_encoder;
                        connector->base.state->best_encoder = &encoder->base;
 
+                       /* FIXME NULL atomic state passed! */
                        if (encoder->disable)
-                               encoder->disable(encoder, crtc_state,
+                               encoder->disable(NULL, encoder, crtc_state,
                                                 connector->base.state);
                        if (encoder->post_disable)
-                               encoder->post_disable(encoder, crtc_state,
+                               encoder->post_disable(NULL, encoder, crtc_state,
                                                      connector->base.state);
 
                        connector->base.state->best_encoder = best_encoder;
@@ -18802,15 +18703,6 @@ void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
 
 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
 
-static bool
-has_transcoder(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder)
-{
-       if (cpu_transcoder == TRANSCODER_EDP)
-               return HAS_TRANSCODER_EDP(dev_priv);
-       else
-               return INTEL_INFO(dev_priv)->pipe_mask & BIT(cpu_transcoder);
-}
-
 struct intel_display_error_state {
 
        u32 power_well_driver;
@@ -18919,7 +18811,7 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
        for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
                enum transcoder cpu_transcoder = transcoders[i];
 
-               if (!has_transcoder(dev_priv, cpu_transcoder))
+               if (!HAS_TRANSCODER(dev_priv, cpu_transcoder))
                        continue;
 
                error->transcoder[i].available = true;
index adb1225a34809cee9e67b84e76d093a6232a0113..efb4da205ea292e7a7cecb819249bc1694e66223 100644 (file)
@@ -320,9 +320,13 @@ enum phy_fia {
        for_each_pipe(__dev_priv, __p) \
                for_each_if((__mask) & BIT(__p))
 
-#define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \
+#define for_each_cpu_transcoder(__dev_priv, __t) \
        for ((__t) = 0; (__t) < I915_MAX_TRANSCODERS; (__t)++)  \
-               for_each_if ((__mask) & (1 << (__t)))
+               for_each_if (INTEL_INFO(__dev_priv)->cpu_transcoder_mask & BIT(__t))
+
+#define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \
+       for_each_cpu_transcoder(__dev_priv, __t) \
+               for_each_if ((__mask) & BIT(__t))
 
 #define for_each_universal_plane(__dev_priv, __pipe, __p)              \
        for ((__p) = 0;                                                 \
@@ -579,13 +583,14 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state);
 enum intel_display_power_domain intel_port_to_power_domain(enum port port);
 enum intel_display_power_domain
 intel_aux_power_domain(struct intel_digital_port *dig_port);
+enum intel_display_power_domain
+intel_legacy_aux_to_power_domain(enum aux_ch aux_ch);
 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
                                 struct intel_crtc_state *pipe_config);
 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
                                  struct intel_crtc_state *crtc_state);
 
 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center);
-int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
 void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state);
 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state);
 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
index 1e6eb7f2f72dbf7afa3d0be9a466c38b8cce0225..70525623bcdf094611c977d32851e11dcc507f5e 100644 (file)
@@ -9,6 +9,7 @@
 #include "i915_debugfs.h"
 #include "intel_csr.h"
 #include "intel_display_debugfs.h"
+#include "intel_display_power.h"
 #include "intel_display_types.h"
 #include "intel_dp.h"
 #include "intel_fbc.h"
@@ -631,15 +632,9 @@ static void intel_dp_info(struct seq_file *m,
 }
 
 static void intel_dp_mst_info(struct seq_file *m,
-                         struct intel_connector *intel_connector)
+                             struct intel_connector *intel_connector)
 {
-       struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
-       struct intel_dp_mst_encoder *intel_mst =
-               enc_to_mst(intel_encoder);
-       struct intel_digital_port *intel_dig_port = intel_mst->primary;
-       struct intel_dp *intel_dp = &intel_dig_port->dp;
-       bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
-                                       intel_connector->port);
+       bool has_audio = intel_connector->port->has_audio;
 
        seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
 }
@@ -1149,6 +1144,51 @@ static int i915_drrs_status(struct seq_file *m, void *unused)
        return 0;
 }
 
+#define LPSP_STATUS(COND) (COND ? seq_puts(m, "LPSP: enabled\n") : \
+                               seq_puts(m, "LPSP: disabled\n"))
+
+static bool
+intel_lpsp_power_well_enabled(struct drm_i915_private *i915,
+                             enum i915_power_well_id power_well_id)
+{
+       intel_wakeref_t wakeref;
+       bool is_enabled;
+
+       wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+       is_enabled = intel_display_power_well_is_enabled(i915,
+                                                        power_well_id);
+       intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+
+       return is_enabled;
+}
+
+static int i915_lpsp_status(struct seq_file *m, void *unused)
+{
+       struct drm_i915_private *i915 = node_to_i915(m->private);
+
+       switch (INTEL_GEN(i915)) {
+       case 12:
+       case 11:
+               LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, ICL_DISP_PW_3));
+               break;
+       case 10:
+       case 9:
+               LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, SKL_DISP_PW_2));
+               break;
+       default:
+               /*
+                * Apart from HASWELL/BROADWELL other legacy platform doesn't
+                * support lpsp.
+                */
+               if (IS_HASWELL(i915) || IS_BROADWELL(i915))
+                       LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, HSW_DISP_PW_GLOBAL));
+               else
+                       seq_puts(m, "LPSP: not supported\n");
+       }
+
+       return 0;
+}
+
 static int i915_dp_mst_info(struct seq_file *m, void *unused)
 {
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -1326,6 +1366,16 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data)
                                           intel_dp->compliance.test_data.vdisplay);
                                seq_printf(m, "bpc: %u\n",
                                           intel_dp->compliance.test_data.bpc);
+                       } else if (intel_dp->compliance.test_type ==
+                                  DP_TEST_LINK_PHY_TEST_PATTERN) {
+                               seq_printf(m, "pattern: %d\n",
+                                          intel_dp->compliance.test_data.phytest.phy_pattern);
+                               seq_printf(m, "Number of lanes: %d\n",
+                                          intel_dp->compliance.test_data.phytest.num_lanes);
+                               seq_printf(m, "Link Rate: %d\n",
+                                          intel_dp->compliance.test_data.phytest.link_rate);
+                               seq_printf(m, "level: %02x\n",
+                                          intel_dp->train_set[0]);
                        }
                } else
                        seq_puts(m, "0");
@@ -1358,7 +1408,7 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
 
                if (encoder && connector->status == connector_status_connected) {
                        intel_dp = enc_to_intel_dp(encoder);
-                       seq_printf(m, "%02lx", intel_dp->compliance.test_type);
+                       seq_printf(m, "%02lx\n", intel_dp->compliance.test_type);
                } else
                        seq_puts(m, "0");
        }
@@ -1906,6 +1956,7 @@ static const struct drm_info_list intel_display_debugfs_list[] = {
        {"i915_dp_mst_info", i915_dp_mst_info, 0},
        {"i915_ddb_info", i915_ddb_info, 0},
        {"i915_drrs_status", i915_drrs_status, 0},
+       {"i915_lpsp_status", i915_lpsp_status, 0},
 };
 
 static const struct {
@@ -1927,7 +1978,7 @@ static const struct {
        {"i915_edp_psr_debug", &i915_edp_psr_debug_fops},
 };
 
-int intel_display_debugfs_register(struct drm_i915_private *i915)
+void intel_display_debugfs_register(struct drm_i915_private *i915)
 {
        struct drm_minor *minor = i915->drm.primary;
        int i;
@@ -1940,9 +1991,9 @@ int intel_display_debugfs_register(struct drm_i915_private *i915)
                                    intel_display_debugfs_files[i].fops);
        }
 
-       return drm_debugfs_create_files(intel_display_debugfs_list,
-                                       ARRAY_SIZE(intel_display_debugfs_list),
-                                       minor->debugfs_root, minor);
+       drm_debugfs_create_files(intel_display_debugfs_list,
+                                ARRAY_SIZE(intel_display_debugfs_list),
+                                minor->debugfs_root, minor);
 }
 
 static int i915_panel_show(struct seq_file *m, void *data)
@@ -1987,6 +2038,48 @@ static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
 }
 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
 
+#define LPSP_CAPABLE(COND) (COND ? seq_puts(m, "LPSP: capable\n") : \
+                               seq_puts(m, "LPSP: incapable\n"))
+
+static int i915_lpsp_capability_show(struct seq_file *m, void *data)
+{
+       struct drm_connector *connector = m->private;
+       struct intel_encoder *encoder =
+                       intel_attached_encoder(to_intel_connector(connector));
+       struct drm_i915_private *i915 = to_i915(connector->dev);
+
+       if (connector->status != connector_status_connected)
+               return -ENODEV;
+
+       switch (INTEL_GEN(i915)) {
+       case 12:
+               /*
+                * Actually TGL can drive LPSP on port till DDI_C
+                * but there is no physical connected DDI_C on TGL sku's,
+                * even driver is not initilizing DDI_C port for gen12.
+                */
+               LPSP_CAPABLE(encoder->port <= PORT_B);
+               break;
+       case 11:
+               LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
+                            connector->connector_type == DRM_MODE_CONNECTOR_eDP);
+               break;
+       case 10:
+       case 9:
+               LPSP_CAPABLE(encoder->port == PORT_A &&
+                            (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
+                            connector->connector_type == DRM_MODE_CONNECTOR_eDP  ||
+                            connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort));
+               break;
+       default:
+               if (IS_HASWELL(i915) || IS_BROADWELL(i915))
+                       LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_eDP);
+       }
+
+       return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_lpsp_capability);
+
 static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
 {
        struct drm_connector *connector = m->private;
@@ -2130,5 +2223,16 @@ int intel_connector_debugfs_add(struct drm_connector *connector)
                debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
                                    connector, &i915_dsc_fec_support_fops);
 
+       /* Legacy panels doesn't lpsp on any platform */
+       if ((INTEL_GEN(dev_priv) >= 9 || IS_HASWELL(dev_priv) ||
+            IS_BROADWELL(dev_priv)) &&
+            (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
+            connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+            connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+            connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+            connector->connector_type == DRM_MODE_CONNECTOR_HDMIB))
+               debugfs_create_file("i915_lpsp_capability", 0444, root,
+                                   connector, &i915_lpsp_capability_fops);
+
        return 0;
 }
index a3bea1ce04c2316f352c5efc32e7882eaa7b3d08..c922c1745bfe170d91af1f857d74db23306e9401 100644 (file)
@@ -10,10 +10,10 @@ struct drm_connector;
 struct drm_i915_private;
 
 #ifdef CONFIG_DEBUG_FS
-int intel_display_debugfs_register(struct drm_i915_private *i915);
+void intel_display_debugfs_register(struct drm_i915_private *i915);
 int intel_connector_debugfs_add(struct drm_connector *connector);
 #else
-static inline int intel_display_debugfs_register(struct drm_i915_private *i915) { return 0; }
+static inline void intel_display_debugfs_register(struct drm_i915_private *i915) {}
 static inline int intel_connector_debugfs_add(struct drm_connector *connector) { return 0; }
 #endif
 
index 84ecf8e58523d4e9a60bcd03c834e1d39b968e54..49998906cc618191ec438d4d1556c954d7519556 100644 (file)
@@ -151,6 +151,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
                return "GT_IRQ";
        case POWER_DOMAIN_DPLL_DC_OFF:
                return "DPLL_DC_OFF";
+       case POWER_DOMAIN_TC_COLD_OFF:
+               return "TC_COLD_OFF";
        default:
                MISSING_CASE(domain);
                return "?";
@@ -282,8 +284,51 @@ static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
                gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
 }
 
+#define ICL_AUX_PW_TO_CH(pw_idx)       \
+       ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
+
+#define ICL_TBT_AUX_PW_TO_CH(pw_idx)   \
+       ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
+
+static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv,
+                                    struct i915_power_well *power_well)
+{
+       int pw_idx = power_well->desc->hsw.idx;
+
+       return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
+                                                ICL_AUX_PW_TO_CH(pw_idx);
+}
+
+static struct intel_digital_port *
+aux_ch_to_digital_port(struct drm_i915_private *dev_priv,
+                      enum aux_ch aux_ch)
+{
+       struct intel_digital_port *dig_port = NULL;
+       struct intel_encoder *encoder;
+
+       for_each_intel_encoder(&dev_priv->drm, encoder) {
+               /* We'll check the MST primary port */
+               if (encoder->type == INTEL_OUTPUT_DP_MST)
+                       continue;
+
+               dig_port = enc_to_dig_port(encoder);
+               if (!dig_port)
+                       continue;
+
+               if (dig_port->aux_ch != aux_ch) {
+                       dig_port = NULL;
+                       continue;
+               }
+
+               break;
+       }
+
+       return dig_port;
+}
+
 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
-                                          struct i915_power_well *power_well)
+                                          struct i915_power_well *power_well,
+                                          bool timeout_expected)
 {
        const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
        int pw_idx = power_well->desc->hsw.idx;
@@ -294,8 +339,8 @@ static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
                drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n",
                            power_well->desc->name);
 
-               /* An AUX timeout is expected if the TBT DP tunnel is down. */
-               drm_WARN_ON(&dev_priv->drm, !power_well->desc->hsw.is_tc_tbt);
+               drm_WARN_ON(&dev_priv->drm, !timeout_expected);
+
        }
 }
 
@@ -358,11 +403,11 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
 {
        const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
        int pw_idx = power_well->desc->hsw.idx;
-       bool wait_fuses = power_well->desc->hsw.has_fuses;
-       enum skl_power_gate uninitialized_var(pg);
        u32 val;
 
-       if (wait_fuses) {
+       if (power_well->desc->hsw.has_fuses) {
+               enum skl_power_gate pg;
+
                pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
                                                 SKL_PW_CTL_IDX_TO_PG(pw_idx);
                /*
@@ -379,19 +424,27 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
        val = intel_de_read(dev_priv, regs->driver);
        intel_de_write(dev_priv, regs->driver,
                       val | HSW_PWR_WELL_CTL_REQ(pw_idx));
-       hsw_wait_for_power_well_enable(dev_priv, power_well);
+
+       hsw_wait_for_power_well_enable(dev_priv, power_well, false);
 
        /* Display WA #1178: cnl */
        if (IS_CANNONLAKE(dev_priv) &&
            pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
            pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
+               u32 val;
+
                val = intel_de_read(dev_priv, CNL_AUX_ANAOVRD1(pw_idx));
                val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
                intel_de_write(dev_priv, CNL_AUX_ANAOVRD1(pw_idx), val);
        }
 
-       if (wait_fuses)
+       if (power_well->desc->hsw.has_fuses) {
+               enum skl_power_gate pg;
+
+               pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
+                                                SKL_PW_CTL_IDX_TO_PG(pw_idx);
                gen9_wait_for_power_well_fuses(dev_priv, pg);
+       }
 
        hsw_power_well_post_enable(dev_priv,
                                   power_well->desc->hsw.irq_pipe_mask,
@@ -437,7 +490,7 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
                               val | ICL_LANE_ENABLE_AUX);
        }
 
-       hsw_wait_for_power_well_enable(dev_priv, power_well);
+       hsw_wait_for_power_well_enable(dev_priv, power_well, false);
 
        /* Display WA #1178: icl */
        if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
@@ -470,21 +523,6 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
        hsw_wait_for_power_well_disable(dev_priv, power_well);
 }
 
-#define ICL_AUX_PW_TO_CH(pw_idx)       \
-       ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
-
-#define ICL_TBT_AUX_PW_TO_CH(pw_idx)   \
-       ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
-
-static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv,
-                                    struct i915_power_well *power_well)
-{
-       int pw_idx = power_well->desc->hsw.idx;
-
-       return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
-                                                ICL_AUX_PW_TO_CH(pw_idx);
-}
-
 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
 
 static u64 async_put_domains_mask(struct i915_power_domains *power_domains);
@@ -501,51 +539,28 @@ static int power_well_async_ref_count(struct drm_i915_private *dev_priv,
 }
 
 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
-                                       struct i915_power_well *power_well)
+                                       struct i915_power_well *power_well,
+                                       struct intel_digital_port *dig_port)
 {
-       enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
-       struct intel_digital_port *dig_port = NULL;
-       struct intel_encoder *encoder;
-
        /* Bypass the check if all references are released asynchronously */
        if (power_well_async_ref_count(dev_priv, power_well) ==
            power_well->count)
                return;
 
-       aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
-
-       for_each_intel_encoder(&dev_priv->drm, encoder) {
-               enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
-
-               if (!intel_phy_is_tc(dev_priv, phy))
-                       continue;
-
-               /* We'll check the MST primary port */
-               if (encoder->type == INTEL_OUTPUT_DP_MST)
-                       continue;
-
-               dig_port = enc_to_dig_port(encoder);
-               if (drm_WARN_ON(&dev_priv->drm, !dig_port))
-                       continue;
-
-               if (dig_port->aux_ch != aux_ch) {
-                       dig_port = NULL;
-                       continue;
-               }
-
-               break;
-       }
-
        if (drm_WARN_ON(&dev_priv->drm, !dig_port))
                return;
 
+       if (INTEL_GEN(dev_priv) == 11 && dig_port->tc_legacy_port)
+               return;
+
        drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port));
 }
 
 #else
 
 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
-                                       struct i915_power_well *power_well)
+                                       struct i915_power_well *power_well,
+                                       struct intel_digital_port *dig_port)
 {
 }
 
@@ -553,24 +568,65 @@ static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
 
 #define TGL_AUX_PW_TO_TC_PORT(pw_idx)  ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1)
 
+static void icl_tc_cold_exit(struct drm_i915_private *i915)
+{
+       int ret, tries = 0;
+
+       while (1) {
+               ret = sandybridge_pcode_write_timeout(i915,
+                                                     ICL_PCODE_EXIT_TCCOLD,
+                                                     0, 250, 1);
+               if (ret != -EAGAIN || ++tries == 3)
+                       break;
+               msleep(1);
+       }
+
+       /* Spec states that TC cold exit can take up to 1ms to complete */
+       if (!ret)
+               msleep(1);
+
+       /* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */
+       drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" :
+                   "succeeded");
+}
+
 static void
 icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
                                 struct i915_power_well *power_well)
 {
        enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
+       struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
+       const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+       bool is_tbt = power_well->desc->hsw.is_tc_tbt;
+       bool timeout_expected;
        u32 val;
 
-       icl_tc_port_assert_ref_held(dev_priv, power_well);
+       icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
 
        val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch));
        val &= ~DP_AUX_CH_CTL_TBT_IO;
-       if (power_well->desc->hsw.is_tc_tbt)
+       if (is_tbt)
                val |= DP_AUX_CH_CTL_TBT_IO;
        intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val);
 
-       hsw_power_well_enable(dev_priv, power_well);
+       val = intel_de_read(dev_priv, regs->driver);
+       intel_de_write(dev_priv, regs->driver,
+                      val | HSW_PWR_WELL_CTL_REQ(power_well->desc->hsw.idx));
+
+       /*
+        * An AUX timeout is expected if the TBT DP tunnel is down,
+        * or need to enable AUX on a legacy TypeC port as part of the TC-cold
+        * exit sequence.
+        */
+       timeout_expected = is_tbt;
+       if (INTEL_GEN(dev_priv) == 11 && dig_port->tc_legacy_port) {
+               icl_tc_cold_exit(dev_priv);
+               timeout_expected = true;
+       }
+
+       hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected);
 
-       if (INTEL_GEN(dev_priv) >= 12 && !power_well->desc->hsw.is_tc_tbt) {
+       if (INTEL_GEN(dev_priv) >= 12 && !is_tbt) {
                enum tc_port tc_port;
 
                tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx);
@@ -588,11 +644,48 @@ static void
 icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
                                  struct i915_power_well *power_well)
 {
-       icl_tc_port_assert_ref_held(dev_priv, power_well);
+       enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
+       struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
+
+       icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
 
        hsw_power_well_disable(dev_priv, power_well);
 }
 
+static void
+icl_aux_power_well_enable(struct drm_i915_private *dev_priv,
+                         struct i915_power_well *power_well)
+{
+       int pw_idx = power_well->desc->hsw.idx;
+       enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);  /* non-TBT only */
+       bool is_tbt = power_well->desc->hsw.is_tc_tbt;
+
+       if (is_tbt || intel_phy_is_tc(dev_priv, phy))
+               return icl_tc_phy_aux_power_well_enable(dev_priv, power_well);
+       else if (IS_ICELAKE(dev_priv))
+               return icl_combo_phy_aux_power_well_enable(dev_priv,
+                                                          power_well);
+       else
+               return hsw_power_well_enable(dev_priv, power_well);
+}
+
+static void
+icl_aux_power_well_disable(struct drm_i915_private *dev_priv,
+                          struct i915_power_well *power_well)
+{
+       int pw_idx = power_well->desc->hsw.idx;
+       enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);  /* non-TBT only */
+       bool is_tbt = power_well->desc->hsw.is_tc_tbt;
+
+       if (is_tbt || intel_phy_is_tc(dev_priv, phy))
+               return icl_tc_phy_aux_power_well_disable(dev_priv, power_well);
+       else if (IS_ICELAKE(dev_priv))
+               return icl_combo_phy_aux_power_well_disable(dev_priv,
+                                                           power_well);
+       else
+               return hsw_power_well_disable(dev_priv, power_well);
+}
+
 /*
  * We should only use the power well if we explicitly asked the hardware to
  * enable it, so check if it's enabled and also check if we've requested it to
@@ -943,7 +1036,7 @@ static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
 
        /* Power wells at this level and above must be disabled for DC5 entry */
        if (INTEL_GEN(dev_priv) >= 12)
-               high_pg = TGL_DISP_PW_3;
+               high_pg = ICL_DISP_PW_3;
        else
                high_pg = SKL_DISP_PW_2;
 
@@ -1873,20 +1966,27 @@ __async_put_domains_state_ok(struct i915_power_domains *power_domains)
 static void print_power_domains(struct i915_power_domains *power_domains,
                                const char *prefix, u64 mask)
 {
+       struct drm_i915_private *i915 = container_of(power_domains,
+                                                    struct drm_i915_private,
+                                                    power_domains);
        enum intel_display_power_domain domain;
 
-       DRM_DEBUG_DRIVER("%s (%lu):\n", prefix, hweight64(mask));
+       drm_dbg(&i915->drm, "%s (%lu):\n", prefix, hweight64(mask));
        for_each_power_domain(domain, mask)
-               DRM_DEBUG_DRIVER("%s use_count %d\n",
-                                intel_display_power_domain_str(domain),
-                                power_domains->domain_use_count[domain]);
+               drm_dbg(&i915->drm, "%s use_count %d\n",
+                       intel_display_power_domain_str(domain),
+                       power_domains->domain_use_count[domain]);
 }
 
 static void
 print_async_put_domains_state(struct i915_power_domains *power_domains)
 {
-       DRM_DEBUG_DRIVER("async_put_wakeref %u\n",
-                        power_domains->async_put_wakeref);
+       struct drm_i915_private *i915 = container_of(power_domains,
+                                                    struct drm_i915_private,
+                                                    power_domains);
+
+       drm_dbg(&i915->drm, "async_put_wakeref %u\n",
+               power_domains->async_put_wakeref);
 
        print_power_domains(power_domains, "async_put_domains[0]",
                            power_domains->async_put_domains[0]);
@@ -2798,6 +2898,21 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
 #define TGL_AUX_I_TBT6_IO_POWER_DOMAINS (      \
        BIT_ULL(POWER_DOMAIN_AUX_I_TBT))
 
+#define TGL_TC_COLD_OFF_POWER_DOMAINS (                \
+       BIT_ULL(POWER_DOMAIN_AUX_D)     |       \
+       BIT_ULL(POWER_DOMAIN_AUX_E)     |       \
+       BIT_ULL(POWER_DOMAIN_AUX_F)     |       \
+       BIT_ULL(POWER_DOMAIN_AUX_G)     |       \
+       BIT_ULL(POWER_DOMAIN_AUX_H)     |       \
+       BIT_ULL(POWER_DOMAIN_AUX_I)     |       \
+       BIT_ULL(POWER_DOMAIN_AUX_D_TBT) |       \
+       BIT_ULL(POWER_DOMAIN_AUX_E_TBT) |       \
+       BIT_ULL(POWER_DOMAIN_AUX_F_TBT) |       \
+       BIT_ULL(POWER_DOMAIN_AUX_G_TBT) |       \
+       BIT_ULL(POWER_DOMAIN_AUX_H_TBT) |       \
+       BIT_ULL(POWER_DOMAIN_AUX_I_TBT) |       \
+       BIT_ULL(POWER_DOMAIN_TC_COLD_OFF))
+
 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
        .sync_hw = i9xx_power_well_sync_hw_noop,
        .enable = i9xx_always_on_power_well_noop,
@@ -3496,17 +3611,10 @@ static const struct i915_power_well_desc cnl_power_wells[] = {
        },
 };
 
-static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
-       .sync_hw = hsw_power_well_sync_hw,
-       .enable = icl_combo_phy_aux_power_well_enable,
-       .disable = icl_combo_phy_aux_power_well_disable,
-       .is_enabled = hsw_power_well_enabled,
-};
-
-static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
+static const struct i915_power_well_ops icl_aux_power_well_ops = {
        .sync_hw = hsw_power_well_sync_hw,
-       .enable = icl_tc_phy_aux_power_well_enable,
-       .disable = icl_tc_phy_aux_power_well_disable,
+       .enable = icl_aux_power_well_enable,
+       .disable = icl_aux_power_well_disable,
        .is_enabled = hsw_power_well_enabled,
 };
 
@@ -3564,7 +3672,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
                .name = "power well 3",
                .domains = ICL_PW_3_POWER_DOMAINS,
                .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
+               .id = ICL_DISP_PW_3,
                {
                        .hsw.regs = &hsw_power_well_regs,
                        .hsw.idx = ICL_PW_CTL_IDX_PW_3,
@@ -3636,7 +3744,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
        {
                .name = "AUX A",
                .domains = ICL_AUX_A_IO_POWER_DOMAINS,
-               .ops = &icl_combo_phy_aux_power_well_ops,
+               .ops = &icl_aux_power_well_ops,
                .id = DISP_PW_ID_NONE,
                {
                        .hsw.regs = &icl_aux_power_well_regs,
@@ -3646,7 +3754,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
        {
                .name = "AUX B",
                .domains = ICL_AUX_B_IO_POWER_DOMAINS,
-               .ops = &icl_combo_phy_aux_power_well_ops,
+               .ops = &icl_aux_power_well_ops,
                .id = DISP_PW_ID_NONE,
                {
                        .hsw.regs = &icl_aux_power_well_regs,
@@ -3656,7 +3764,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
        {
                .name = "AUX C TC1",
                .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
-               .ops = &icl_tc_phy_aux_power_well_ops,
+               .ops = &icl_aux_power_well_ops,
                .id = DISP_PW_ID_NONE,
                {
                        .hsw.regs = &icl_aux_power_well_regs,
@@ -3667,7 +3775,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
        {
                .name = "AUX D TC2",
                .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
-               .ops = &icl_tc_phy_aux_power_well_ops,
+               .ops = &icl_aux_power_well_ops,
                .id = DISP_PW_ID_NONE,
                {
                        .hsw.regs = &icl_aux_power_well_regs,
@@ -3678,7 +3786,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
        {
                .name = "AUX E TC3",
                .domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS,
-               .ops = &icl_tc_phy_aux_power_well_ops,
+               .ops = &icl_aux_power_well_ops,
                .id = DISP_PW_ID_NONE,
                {
                        .hsw.regs = &icl_aux_power_well_regs,
@@ -3689,7 +3797,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
        {
                .name = "AUX F TC4",
                .domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS,
-               .ops = &icl_tc_phy_aux_power_well_ops,
+               .ops = &icl_aux_power_well_ops,
                .id = DISP_PW_ID_NONE,
                {
                        .hsw.regs = &icl_aux_power_well_regs,
@@ -3700,7 +3808,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
        {
                .name = "AUX C TBT1",
                .domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS,
-               .ops = &icl_tc_phy_aux_power_well_ops,
+               .ops = &icl_aux_power_well_ops,
                .id = DISP_PW_ID_NONE,
                {
                        .hsw.regs = &icl_aux_power_well_regs,
@@ -3711,7 +3819,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
        {
                .name = "AUX D TBT2",
                .domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS,
-               .ops = &icl_tc_phy_aux_power_well_ops,
+               .ops = &icl_aux_power_well_ops,
                .id = DISP_PW_ID_NONE,
                {
                        .hsw.regs = &icl_aux_power_well_regs,
@@ -3722,7 +3830,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
        {
                .name = "AUX E TBT3",
                .domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS,
-               .ops = &icl_tc_phy_aux_power_well_ops,
+               .ops = &icl_aux_power_well_ops,
                .id = DISP_PW_ID_NONE,
                {
                        .hsw.regs = &icl_aux_power_well_regs,
@@ -3733,7 +3841,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
        {
                .name = "AUX F TBT4",
                .domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS,
-               .ops = &icl_tc_phy_aux_power_well_ops,
+               .ops = &icl_aux_power_well_ops,
                .id = DISP_PW_ID_NONE,
                {
                        .hsw.regs = &icl_aux_power_well_regs,
@@ -3755,149 +3863,89 @@ static const struct i915_power_well_desc icl_power_wells[] = {
        },
 };
 
-static const struct i915_power_well_desc ehl_power_wells[] = {
-       {
-               .name = "always-on",
-               .always_on = true,
-               .domains = POWER_DOMAIN_MASK,
-               .ops = &i9xx_always_on_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-       },
-       {
-               .name = "power well 1",
-               /* Handled by the DMC firmware */
-               .always_on = true,
-               .domains = 0,
-               .ops = &hsw_power_well_ops,
-               .id = SKL_DISP_PW_1,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = ICL_PW_CTL_IDX_PW_1,
-                       .hsw.has_fuses = true,
-               },
-       },
-       {
-               .name = "DC off",
-               .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
-               .ops = &gen9_dc_off_power_well_ops,
-               .id = SKL_DISP_DC_OFF,
-       },
-       {
-               .name = "power well 2",
-               .domains = ICL_PW_2_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = SKL_DISP_PW_2,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = ICL_PW_CTL_IDX_PW_2,
-                       .hsw.has_fuses = true,
-               },
-       },
-       {
-               .name = "power well 3",
-               .domains = ICL_PW_3_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = ICL_PW_CTL_IDX_PW_3,
-                       .hsw.irq_pipe_mask = BIT(PIPE_B),
-                       .hsw.has_vga = true,
-                       .hsw.has_fuses = true,
-               },
-       },
-       {
-               .name = "DDI A IO",
-               .domains = ICL_DDI_IO_A_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &icl_ddi_power_well_regs,
-                       .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
-               },
-       },
-       {
-               .name = "DDI B IO",
-               .domains = ICL_DDI_IO_B_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &icl_ddi_power_well_regs,
-                       .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
-               },
-       },
-       {
-               .name = "DDI C IO",
-               .domains = ICL_DDI_IO_C_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &icl_ddi_power_well_regs,
-                       .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
-               },
-       },
-       {
-               .name = "DDI D IO",
-               .domains = ICL_DDI_IO_D_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &icl_ddi_power_well_regs,
-                       .hsw.idx = ICL_PW_CTL_IDX_DDI_D,
-               },
-       },
-       {
-               .name = "AUX A",
-               .domains = ICL_AUX_A_IO_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &icl_aux_power_well_regs,
-                       .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
-               },
-       },
-       {
-               .name = "AUX B",
-               .domains = ICL_AUX_B_IO_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &icl_aux_power_well_regs,
-                       .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
-               },
-       },
-       {
-               .name = "AUX C",
-               .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &icl_aux_power_well_regs,
-                       .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
-               },
-       },
-       {
-               .name = "AUX D",
-               .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &icl_aux_power_well_regs,
-                       .hsw.idx = ICL_PW_CTL_IDX_AUX_D,
-               },
-       },
-       {
-               .name = "power well 4",
-               .domains = ICL_PW_4_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = ICL_PW_CTL_IDX_PW_4,
-                       .hsw.has_fuses = true,
-                       .hsw.irq_pipe_mask = BIT(PIPE_C),
-               },
-       },
+static void
+tgl_tc_cold_request(struct drm_i915_private *i915, bool block)
+{
+       u8 tries = 0;
+       int ret;
+
+       while (1) {
+               u32 low_val = 0, high_val;
+
+               if (block)
+                       high_val = TGL_PCODE_EXIT_TCCOLD_DATA_H_BLOCK_REQ;
+               else
+                       high_val = TGL_PCODE_EXIT_TCCOLD_DATA_H_UNBLOCK_REQ;
+
+               /*
+                * Spec states that we should timeout the request after 200us
+                * but the function below will timeout after 500us
+                */
+               ret = sandybridge_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val,
+                                            &high_val);
+               if (ret == 0) {
+                       if (block &&
+                           (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED))
+                               ret = -EIO;
+                       else
+                               break;
+               }
+
+               if (++tries == 3)
+                       break;
+
+               if (ret == -EAGAIN)
+                       msleep(1);
+       }
+
+       if (ret)
+               drm_err(&i915->drm, "TC cold %sblock failed\n",
+                       block ? "" : "un");
+       else
+               drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n",
+                           block ? "" : "un");
+}
+
+static void
+tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915,
+                                 struct i915_power_well *power_well)
+{
+       tgl_tc_cold_request(i915, true);
+}
+
+static void
+tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915,
+                                  struct i915_power_well *power_well)
+{
+       tgl_tc_cold_request(i915, false);
+}
+
+static void
+tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915,
+                                  struct i915_power_well *power_well)
+{
+       if (power_well->count > 0)
+               tgl_tc_cold_off_power_well_enable(i915, power_well);
+       else
+               tgl_tc_cold_off_power_well_disable(i915, power_well);
+}
+
+static bool
+tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv,
+                                     struct i915_power_well *power_well)
+{
+       /*
+        * Not the correctly implementation but there is no way to just read it
+        * from PCODE, so returning count to avoid state mismatch errors
+        */
+       return power_well->count;
+}
+
+static const struct i915_power_well_ops tgl_tc_cold_off_ops = {
+       .sync_hw = tgl_tc_cold_off_power_well_sync_hw,
+       .enable = tgl_tc_cold_off_power_well_enable,
+       .disable = tgl_tc_cold_off_power_well_disable,
+       .is_enabled = tgl_tc_cold_off_power_well_is_enabled,
 };
 
 static const struct i915_power_well_desc tgl_power_wells[] = {
@@ -3942,7 +3990,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
                .name = "power well 3",
                .domains = TGL_PW_3_POWER_DOMAINS,
                .ops = &hsw_power_well_ops,
-               .id = TGL_DISP_PW_3,
+               .id = ICL_DISP_PW_3,
                {
                        .hsw.regs = &hsw_power_well_regs,
                        .hsw.idx = ICL_PW_CTL_IDX_PW_3,
@@ -4044,7 +4092,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
        {
                .name = "AUX A",
                .domains = TGL_AUX_A_IO_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
+               .ops = &icl_aux_power_well_ops,
                .id = DISP_PW_ID_NONE,
                {
                        .hsw.regs = &icl_aux_power_well_regs,
@@ -4054,7 +4102,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
        {
                .name = "AUX B",
                .domains = TGL_AUX_B_IO_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
+               .ops = &icl_aux_power_well_ops,
                .id = DISP_PW_ID_NONE,
                {
                        .hsw.regs = &icl_aux_power_well_regs,
@@ -4064,7 +4112,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
        {
                .name = "AUX C",
                .domains = TGL_AUX_C_IO_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
+               .ops = &icl_aux_power_well_ops,
                .id = DISP_PW_ID_NONE,
                {
                        .hsw.regs = &icl_aux_power_well_regs,
@@ -4074,7 +4122,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
        {
                .name = "AUX D TC1",
                .domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS,
-               .ops = &icl_tc_phy_aux_power_well_ops,
+               .ops = &icl_aux_power_well_ops,
                .id = DISP_PW_ID_NONE,
                {
                        .hsw.regs = &icl_aux_power_well_regs,
@@ -4085,7 +4133,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
        {
                .name = "AUX E TC2",
                .domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS,
-               .ops = &icl_tc_phy_aux_power_well_ops,
+               .ops = &icl_aux_power_well_ops,
                .id = DISP_PW_ID_NONE,
                {
                        .hsw.regs = &icl_aux_power_well_regs,
@@ -4096,7 +4144,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
        {
                .name = "AUX F TC3",
                .domains = TGL_AUX_F_TC3_IO_POWER_DOMAINS,
-               .ops = &icl_tc_phy_aux_power_well_ops,
+               .ops = &icl_aux_power_well_ops,
                .id = DISP_PW_ID_NONE,
                {
                        .hsw.regs = &icl_aux_power_well_regs,
@@ -4107,7 +4155,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
        {
                .name = "AUX G TC4",
                .domains = TGL_AUX_G_TC4_IO_POWER_DOMAINS,
-               .ops = &icl_tc_phy_aux_power_well_ops,
+               .ops = &icl_aux_power_well_ops,
                .id = DISP_PW_ID_NONE,
                {
                        .hsw.regs = &icl_aux_power_well_regs,
@@ -4118,7 +4166,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
        {
                .name = "AUX H TC5",
                .domains = TGL_AUX_H_TC5_IO_POWER_DOMAINS,
-               .ops = &icl_tc_phy_aux_power_well_ops,
+               .ops = &icl_aux_power_well_ops,
                .id = DISP_PW_ID_NONE,
                {
                        .hsw.regs = &icl_aux_power_well_regs,
@@ -4129,7 +4177,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
        {
                .name = "AUX I TC6",
                .domains = TGL_AUX_I_TC6_IO_POWER_DOMAINS,
-               .ops = &icl_tc_phy_aux_power_well_ops,
+               .ops = &icl_aux_power_well_ops,
                .id = DISP_PW_ID_NONE,
                {
                        .hsw.regs = &icl_aux_power_well_regs,
@@ -4140,7 +4188,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
        {
                .name = "AUX D TBT1",
                .domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS,
-               .ops = &icl_tc_phy_aux_power_well_ops,
+               .ops = &icl_aux_power_well_ops,
                .id = DISP_PW_ID_NONE,
                {
                        .hsw.regs = &icl_aux_power_well_regs,
@@ -4151,7 +4199,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
        {
                .name = "AUX E TBT2",
                .domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS,
-               .ops = &icl_tc_phy_aux_power_well_ops,
+               .ops = &icl_aux_power_well_ops,
                .id = DISP_PW_ID_NONE,
                {
                        .hsw.regs = &icl_aux_power_well_regs,
@@ -4162,7 +4210,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
        {
                .name = "AUX F TBT3",
                .domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS,
-               .ops = &icl_tc_phy_aux_power_well_ops,
+               .ops = &icl_aux_power_well_ops,
                .id = DISP_PW_ID_NONE,
                {
                        .hsw.regs = &icl_aux_power_well_regs,
@@ -4173,7 +4221,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
        {
                .name = "AUX G TBT4",
                .domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS,
-               .ops = &icl_tc_phy_aux_power_well_ops,
+               .ops = &icl_aux_power_well_ops,
                .id = DISP_PW_ID_NONE,
                {
                        .hsw.regs = &icl_aux_power_well_regs,
@@ -4184,7 +4232,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
        {
                .name = "AUX H TBT5",
                .domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS,
-               .ops = &icl_tc_phy_aux_power_well_ops,
+               .ops = &icl_aux_power_well_ops,
                .id = DISP_PW_ID_NONE,
                {
                        .hsw.regs = &icl_aux_power_well_regs,
@@ -4195,7 +4243,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
        {
                .name = "AUX I TBT6",
                .domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS,
-               .ops = &icl_tc_phy_aux_power_well_ops,
+               .ops = &icl_aux_power_well_ops,
                .id = DISP_PW_ID_NONE,
                {
                        .hsw.regs = &icl_aux_power_well_regs,
@@ -4227,6 +4275,12 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
                        .hsw.irq_pipe_mask = BIT(PIPE_D),
                },
        },
+       {
+               .name = "TC cold off",
+               .domains = TGL_TC_COLD_OFF_POWER_DOMAINS,
+               .ops = &tgl_tc_cold_off_ops,
+               .id = DISP_PW_ID_NONE,
+       },
 };
 
 static int
@@ -4376,8 +4430,6 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
         */
        if (IS_GEN(dev_priv, 12)) {
                err = set_power_wells(power_domains, tgl_power_wells);
-       } else if (IS_ELKHARTLAKE(dev_priv)) {
-               err = set_power_wells(power_domains, ehl_power_wells);
        } else if (IS_GEN(dev_priv, 11)) {
                err = set_power_wells(power_domains, icl_power_wells);
        } else if (IS_CANNONLAKE(dev_priv)) {
@@ -4439,9 +4491,8 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
        mutex_unlock(&power_domains->lock);
 }
 
-static inline
-bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
-                         i915_reg_t reg, bool enable)
+static bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
+                                i915_reg_t reg, bool enable)
 {
        u32 val, status;
 
@@ -4480,7 +4531,8 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
        drm_WARN(&dev_priv->drm, hweight8(req_slices) > max_slices,
                 "Invalid number of dbuf slices requested\n");
 
-       DRM_DEBUG_KMS("Updating dbuf slices to 0x%x\n", req_slices);
+       drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n",
+                   req_slices);
 
        /*
         * Might be running this in parallel to gen9_dc_off_power_well_enable
@@ -5016,7 +5068,7 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
        const struct buddy_page_mask *table;
        int i;
 
-       if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0))
+       if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B0))
                /* Wa_1409767108: tgl */
                table = wa_1409767108_buddy_page_masks;
        else
index da64a5edae7ad14c75f3519bc14771ea8bf8f141..6c917699293b13704e4c190b76ad3f7baabf0d74 100644 (file)
@@ -76,6 +76,7 @@ enum intel_display_power_domain {
        POWER_DOMAIN_MODESET,
        POWER_DOMAIN_GT_IRQ,
        POWER_DOMAIN_DPLL_DC_OFF,
+       POWER_DOMAIN_TC_COLD_OFF,
        POWER_DOMAIN_INIT,
 
        POWER_DOMAIN_NUM,
@@ -100,7 +101,7 @@ enum i915_power_well_id {
        SKL_DISP_PW_MISC_IO,
        SKL_DISP_PW_1,
        SKL_DISP_PW_2,
-       TGL_DISP_PW_3,
+       ICL_DISP_PW_3,
        SKL_DISP_DC_OFF,
 };
 
@@ -266,6 +267,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain);
 
 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
                                    enum intel_display_power_domain domain);
+bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
+                                        enum i915_power_well_id power_well_id);
 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
                                      enum intel_display_power_domain domain);
 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
index 5e00e611f077f80a29fe048551f7b732f6b78a7c..2bf3d4cb4ea983c872a9f19565f1bb953b9ab30c 100644 (file)
@@ -132,8 +132,7 @@ struct intel_encoder {
        u16 cloneable;
        u8 pipe_mask;
        enum intel_hotplug_state (*hotplug)(struct intel_encoder *encoder,
-                                           struct intel_connector *connector,
-                                           bool irq_received);
+                                           struct intel_connector *connector);
        enum intel_output_type (*compute_output_type)(struct intel_encoder *,
                                                      struct intel_crtc_state *,
                                                      struct drm_connector_state *);
@@ -146,28 +145,35 @@ struct intel_encoder {
        void (*update_prepare)(struct intel_atomic_state *,
                               struct intel_encoder *,
                               struct intel_crtc *);
-       void (*pre_pll_enable)(struct intel_encoder *,
+       void (*pre_pll_enable)(struct intel_atomic_state *,
+                              struct intel_encoder *,
                               const struct intel_crtc_state *,
                               const struct drm_connector_state *);
-       void (*pre_enable)(struct intel_encoder *,
+       void (*pre_enable)(struct intel_atomic_state *,
+                          struct intel_encoder *,
                           const struct intel_crtc_state *,
                           const struct drm_connector_state *);
-       void (*enable)(struct intel_encoder *,
+       void (*enable)(struct intel_atomic_state *,
+                      struct intel_encoder *,
                       const struct intel_crtc_state *,
                       const struct drm_connector_state *);
        void (*update_complete)(struct intel_atomic_state *,
                                struct intel_encoder *,
                                struct intel_crtc *);
-       void (*disable)(struct intel_encoder *,
+       void (*disable)(struct intel_atomic_state *,
+                       struct intel_encoder *,
                        const struct intel_crtc_state *,
                        const struct drm_connector_state *);
-       void (*post_disable)(struct intel_encoder *,
+       void (*post_disable)(struct intel_atomic_state *,
+                            struct intel_encoder *,
                             const struct intel_crtc_state *,
                             const struct drm_connector_state *);
-       void (*post_pll_disable)(struct intel_encoder *,
+       void (*post_pll_disable)(struct intel_atomic_state *,
+                                struct intel_encoder *,
                                 const struct intel_crtc_state *,
                                 const struct drm_connector_state *);
-       void (*update_pipe)(struct intel_encoder *,
+       void (*update_pipe)(struct intel_atomic_state *,
+                           struct intel_encoder *,
                            const struct intel_crtc_state *,
                            const struct drm_connector_state *);
        /* Read out the current hw state of this connector, returning true if
@@ -425,11 +431,14 @@ struct intel_connector {
        struct edid *edid;
        struct edid *detect_edid;
 
+       /* Number of times hotplug detection was tried after an HPD interrupt */
+       int hotplug_retries;
+
        /* since POLL and HPD connectors may use the same HPD line keep the native
           state of connector->polled in case hotplug storm detection changes it */
        u8 polled;
 
-       void *port; /* store this opaque as its illegal to dereference it */
+       struct drm_dp_mst_port *port;
 
        struct intel_dp *mst_port;
 
@@ -640,6 +649,16 @@ struct intel_crtc_scaler_state {
 #define I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP (1<<1)
 /* Flag to use the scanline counter instead of the pixel counter */
 #define I915_MODE_FLAG_USE_SCANLINE_COUNTER (1<<2)
+/*
+ * TE0 or TE1 flag is set if the crtc has a DSI encoder which
+ * is operating in command mode.
+ * Flag to use TE from DSI0 instead of VBI in command mode
+ */
+#define I915_MODE_FLAG_DSI_USE_TE0 (1<<3)
+/* Flag to use TE from DSI1 instead of VBI in command mode */
+#define I915_MODE_FLAG_DSI_USE_TE1 (1<<4)
+/* Flag to indicate mipi dsi periodic command mode where we do not get TE */
+#define I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE (1<<5)
 
 struct intel_wm_level {
        bool enable;
@@ -669,11 +688,13 @@ struct skl_plane_wm {
        struct skl_wm_level wm[8];
        struct skl_wm_level uv_wm[8];
        struct skl_wm_level trans_wm;
+       struct skl_wm_level sagv_wm0;
        bool is_planar;
 };
 
 struct skl_pipe_wm {
        struct skl_plane_wm planes[I915_MAX_PLANES];
+       bool use_sagv_wm;
 };
 
 enum vlv_wm_level {
@@ -955,8 +976,7 @@ struct intel_crtc_state {
 
        /* Panel fitter placement and size for Ironlake+ */
        struct {
-               u32 pos;
-               u32 size;
+               struct drm_rect dst;
                bool enabled;
                bool force_thru;
        } pch_pfit;
@@ -1015,6 +1035,7 @@ struct intel_crtc_state {
                union hdmi_infoframe spd;
                union hdmi_infoframe hdmi;
                union hdmi_infoframe drm;
+               struct drm_dp_vsc_sdp vsc;
        } infoframes;
 
        /* HDMI scrambling status */
@@ -1238,6 +1259,7 @@ struct intel_dp_compliance_data {
        u8 video_pattern;
        u16 hdisplay, vdisplay;
        u8 bpc;
+       struct drm_dp_phy_test_params phytest;
 };
 
 struct intel_dp_compliance {
@@ -1347,6 +1369,9 @@ struct intel_dp {
 
        /* This is called before a link training is starterd */
        void (*prepare_link_retrain)(struct intel_dp *intel_dp);
+       void (*set_link_train)(struct intel_dp *intel_dp, u8 dp_train_pat);
+       void (*set_idle_link_train)(struct intel_dp *intel_dp);
+       void (*set_signal_levels)(struct intel_dp *intel_dp);
 
        /* Displayport compliance testing */
        struct intel_dp_compliance compliance;
@@ -1401,6 +1426,7 @@ struct intel_digital_port {
                               const struct drm_connector_state *conn_state);
        u32 (*infoframes_enabled)(struct intel_encoder *encoder,
                                  const struct intel_crtc_state *pipe_config);
+       bool (*connected)(struct intel_encoder *encoder);
 };
 
 struct intel_dp_mst_encoder {
index a2fafd4499f2a67d1b2ae2dfd2748d3f38013268..40d42dcff0b7d68afd17e5b3912818a41c010f3c 100644 (file)
@@ -48,7 +48,6 @@
 #include "intel_audio.h"
 #include "intel_connector.h"
 #include "intel_ddi.h"
-#include "intel_display_debugfs.h"
 #include "intel_display_types.h"
 #include "intel_dp.h"
 #include "intel_dp_link_training.h"
@@ -164,6 +163,17 @@ static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
        };
        int i, max_rate;
 
+       if (drm_dp_has_quirk(&intel_dp->desc, 0,
+                            DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) {
+               /* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */
+               static const int quirk_rates[] = { 162000, 270000, 324000 };
+
+               memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates));
+               intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates);
+
+               return;
+       }
+
        max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
 
        for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
@@ -452,6 +462,7 @@ static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
                                            int link_rate, u8 lane_count)
 {
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
        int index;
 
        index = intel_dp_rate_index(intel_dp->common_rates,
@@ -462,7 +473,8 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
                    !intel_dp_can_link_train_fallback_for_edp(intel_dp,
                                                              intel_dp->common_rates[index - 1],
                                                              lane_count)) {
-                       DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
+                       drm_dbg_kms(&i915->drm,
+                                   "Retrying Link training for eDP with same parameters\n");
                        return 0;
                }
                intel_dp->max_link_rate = intel_dp->common_rates[index - 1];
@@ -472,13 +484,14 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
                    !intel_dp_can_link_train_fallback_for_edp(intel_dp,
                                                              intel_dp_max_common_rate(intel_dp),
                                                              lane_count >> 1)) {
-                       DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
+                       drm_dbg_kms(&i915->drm,
+                                   "Retrying Link training for eDP with same parameters\n");
                        return 0;
                }
                intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
                intel_dp->max_link_lane_count = lane_count >> 1;
        } else {
-               DRM_ERROR("Link Training Unsuccessful\n");
+               drm_err(&i915->drm, "Link Training Unsuccessful\n");
                return -1;
        }
 
@@ -553,6 +566,7 @@ static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
 static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
                                       int mode_clock, int mode_hdisplay)
 {
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
        u8 min_slice_count, i;
        int max_slice_width;
 
@@ -565,8 +579,9 @@ static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
 
        max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
        if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
-               DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
-                             max_slice_width);
+               drm_dbg_kms(&i915->drm,
+                           "Unsupported slice width %d by DP DSC Sink device\n",
+                           max_slice_width);
                return 0;
        }
        /* Also take into account max slice width */
@@ -584,7 +599,8 @@ static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
                        return valid_dsc_slicecount[i];
        }
 
-       DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
+       drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n",
+                   min_slice_count);
        return 0;
 }
 
@@ -1343,8 +1359,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
        bool is_tc_port = intel_phy_is_tc(i915, phy);
        i915_reg_t ch_ctl, ch_data[5];
        u32 aux_clock_divider;
-       enum intel_display_power_domain aux_domain =
-               intel_aux_power_domain(intel_dig_port);
+       enum intel_display_power_domain aux_domain;
        intel_wakeref_t aux_wakeref;
        intel_wakeref_t pps_wakeref;
        int i, ret, recv_bytes;
@@ -1359,6 +1374,8 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
        if (is_tc_port)
                intel_tc_port_lock(intel_dig_port);
 
+       aux_domain = intel_aux_power_domain(intel_dig_port);
+
        aux_wakeref = intel_display_power_get(i915, aux_domain);
        pps_wakeref = pps_lock(intel_dp);
 
@@ -1832,6 +1849,7 @@ static void snprintf_int_array(char *str, size_t len,
 
 static void intel_dp_print_rates(struct intel_dp *intel_dp)
 {
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
        char str[128]; /* FIXME: too big for stack? */
 
        if (!drm_debug_enabled(DRM_UT_KMS))
@@ -1839,15 +1857,15 @@ static void intel_dp_print_rates(struct intel_dp *intel_dp)
 
        snprintf_int_array(str, sizeof(str),
                           intel_dp->source_rates, intel_dp->num_source_rates);
-       DRM_DEBUG_KMS("source rates: %s\n", str);
+       drm_dbg_kms(&i915->drm, "source rates: %s\n", str);
 
        snprintf_int_array(str, sizeof(str),
                           intel_dp->sink_rates, intel_dp->num_sink_rates);
-       DRM_DEBUG_KMS("sink rates: %s\n", str);
+       drm_dbg_kms(&i915->drm, "sink rates: %s\n", str);
 
        snprintf_int_array(str, sizeof(str),
                           intel_dp->common_rates, intel_dp->num_common_rates);
-       DRM_DEBUG_KMS("common rates: %s\n", str);
+       drm_dbg_kms(&i915->drm, "common rates: %s\n", str);
 }
 
 int
@@ -1954,6 +1972,8 @@ intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
                                  struct intel_crtc_state *pipe_config,
                                  struct link_config_limits *limits)
 {
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
        /* For DP Compliance we override the computed bpp for the pipe */
        if (intel_dp->compliance.test_data.bpc != 0) {
                int bpp = 3 * intel_dp->compliance.test_data.bpc;
@@ -1961,7 +1981,7 @@ intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
                limits->min_bpp = limits->max_bpp = bpp;
                pipe_config->dither_force_disable = bpp == 6 * 3;
 
-               DRM_DEBUG_KMS("Setting pipe_bpp to %d\n", bpp);
+               drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp);
        }
 
        /* Use values requested by Compliance Test Request */
@@ -2055,6 +2075,7 @@ static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
 static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
                                       struct intel_crtc_state *crtc_state)
 {
+       struct drm_i915_private *i915 = to_i915(encoder->base.dev);
        struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
        struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
        u8 line_buf_depth;
@@ -2089,7 +2110,8 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
 
        line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd);
        if (!line_buf_depth) {
-               DRM_DEBUG_KMS("DSC Sink Line Buffer Depth invalid\n");
+               drm_dbg_kms(&i915->drm,
+                           "DSC Sink Line Buffer Depth invalid\n");
                return -EINVAL;
        }
 
@@ -2114,7 +2136,8 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
 {
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
-       struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+       const struct drm_display_mode *adjusted_mode =
+               &pipe_config->hw.adjusted_mode;
        u8 dsc_max_bpc;
        int pipe_bpp;
        int ret;
@@ -2229,7 +2252,9 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
                             struct intel_crtc_state *pipe_config,
                             struct drm_connector_state *conn_state)
 {
-       struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+       struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+       const struct drm_display_mode *adjusted_mode =
+               &pipe_config->hw.adjusted_mode;
        struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
        struct link_config_limits limits;
        int common_len;
@@ -2264,11 +2289,11 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
 
        intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
 
-       DRM_DEBUG_KMS("DP link computation with max lane count %i "
-                     "max rate %d max bpp %d pixel clock %iKHz\n",
-                     limits.max_lane_count,
-                     intel_dp->common_rates[limits.max_clock],
-                     limits.max_bpp, adjusted_mode->crtc_clock);
+       drm_dbg_kms(&i915->drm, "DP link computation with max lane count %i "
+                   "max rate %d max bpp %d pixel clock %iKHz\n",
+                   limits.max_lane_count,
+                   intel_dp->common_rates[limits.max_clock],
+                   limits.max_bpp, adjusted_mode->crtc_clock);
 
        /*
         * Optimize for slow and wide. This is the place to add alternative
@@ -2277,7 +2302,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
        ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
 
        /* enable compression if the mode doesn't fit available BW */
-       DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en);
+       drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en);
        if (ret || intel_dp->force_dsc_en) {
                ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
                                                  conn_state, &limits);
@@ -2286,40 +2311,42 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
        }
 
        if (pipe_config->dsc.compression_enable) {
-               DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
-                             pipe_config->lane_count, pipe_config->port_clock,
-                             pipe_config->pipe_bpp,
-                             pipe_config->dsc.compressed_bpp);
-
-               DRM_DEBUG_KMS("DP link rate required %i available %i\n",
-                             intel_dp_link_required(adjusted_mode->crtc_clock,
-                                                    pipe_config->dsc.compressed_bpp),
-                             intel_dp_max_data_rate(pipe_config->port_clock,
-                                                    pipe_config->lane_count));
+               drm_dbg_kms(&i915->drm,
+                           "DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
+                           pipe_config->lane_count, pipe_config->port_clock,
+                           pipe_config->pipe_bpp,
+                           pipe_config->dsc.compressed_bpp);
+
+               drm_dbg_kms(&i915->drm,
+                           "DP link rate required %i available %i\n",
+                           intel_dp_link_required(adjusted_mode->crtc_clock,
+                                                  pipe_config->dsc.compressed_bpp),
+                           intel_dp_max_data_rate(pipe_config->port_clock,
+                                                  pipe_config->lane_count));
        } else {
-               DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
-                             pipe_config->lane_count, pipe_config->port_clock,
-                             pipe_config->pipe_bpp);
+               drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n",
+                           pipe_config->lane_count, pipe_config->port_clock,
+                           pipe_config->pipe_bpp);
 
-               DRM_DEBUG_KMS("DP link rate required %i available %i\n",
-                             intel_dp_link_required(adjusted_mode->crtc_clock,
-                                                    pipe_config->pipe_bpp),
-                             intel_dp_max_data_rate(pipe_config->port_clock,
-                                                    pipe_config->lane_count));
+               drm_dbg_kms(&i915->drm,
+                           "DP link rate required %i available %i\n",
+                           intel_dp_link_required(adjusted_mode->crtc_clock,
+                                                  pipe_config->pipe_bpp),
+                           intel_dp_max_data_rate(pipe_config->port_clock,
+                                                  pipe_config->lane_count));
        }
        return 0;
 }
 
 static int
 intel_dp_ycbcr420_config(struct intel_dp *intel_dp,
-                        struct drm_connector *connector,
-                        struct intel_crtc_state *crtc_state)
+                        struct intel_crtc_state *crtc_state,
+                        const struct drm_connector_state *conn_state)
 {
+       struct drm_connector *connector = conn_state->connector;
        const struct drm_display_info *info = &connector->display_info;
        const struct drm_display_mode *adjusted_mode =
                &crtc_state->hw.adjusted_mode;
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
-       int ret;
 
        if (!drm_mode_is_420_only(info, adjusted_mode) ||
            !intel_dp_get_colorimetry_status(intel_dp) ||
@@ -2328,16 +2355,7 @@ intel_dp_ycbcr420_config(struct intel_dp *intel_dp,
 
        crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
 
-       /* YCBCR 420 output conversion needs a scaler */
-       ret = skl_update_scaler_crtc(crtc_state);
-       if (ret) {
-               DRM_DEBUG_KMS("Scaler allocation for output failed\n");
-               return ret;
-       }
-
-       intel_pch_panel_fitting(crtc, crtc_state, DRM_MODE_SCALE_FULLSCREEN);
-
-       return 0;
+       return intel_pch_panel_fitting(crtc_state, conn_state);
 }
 
 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
@@ -2384,6 +2402,164 @@ static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv,
        return true;
 }
 
+static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state,
+                                            const struct drm_connector_state *conn_state,
+                                            struct drm_dp_vsc_sdp *vsc)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+       /*
+        * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
+        * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
+        * Colorimetry Format indication.
+        */
+       vsc->revision = 0x5;
+       vsc->length = 0x13;
+
+       /* DP 1.4a spec, Table 2-120 */
+       switch (crtc_state->output_format) {
+       case INTEL_OUTPUT_FORMAT_YCBCR444:
+               vsc->pixelformat = DP_PIXELFORMAT_YUV444;
+               break;
+       case INTEL_OUTPUT_FORMAT_YCBCR420:
+               vsc->pixelformat = DP_PIXELFORMAT_YUV420;
+               break;
+       case INTEL_OUTPUT_FORMAT_RGB:
+       default:
+               vsc->pixelformat = DP_PIXELFORMAT_RGB;
+       }
+
+       switch (conn_state->colorspace) {
+       case DRM_MODE_COLORIMETRY_BT709_YCC:
+               vsc->colorimetry = DP_COLORIMETRY_BT709_YCC;
+               break;
+       case DRM_MODE_COLORIMETRY_XVYCC_601:
+               vsc->colorimetry = DP_COLORIMETRY_XVYCC_601;
+               break;
+       case DRM_MODE_COLORIMETRY_XVYCC_709:
+               vsc->colorimetry = DP_COLORIMETRY_XVYCC_709;
+               break;
+       case DRM_MODE_COLORIMETRY_SYCC_601:
+               vsc->colorimetry = DP_COLORIMETRY_SYCC_601;
+               break;
+       case DRM_MODE_COLORIMETRY_OPYCC_601:
+               vsc->colorimetry = DP_COLORIMETRY_OPYCC_601;
+               break;
+       case DRM_MODE_COLORIMETRY_BT2020_CYCC:
+               vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC;
+               break;
+       case DRM_MODE_COLORIMETRY_BT2020_RGB:
+               vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB;
+               break;
+       case DRM_MODE_COLORIMETRY_BT2020_YCC:
+               vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC;
+               break;
+       case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65:
+       case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER:
+               vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB;
+               break;
+       default:
+               /*
+                * RGB->YCBCR color conversion uses the BT.709
+                * color space.
+                */
+               if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+                       vsc->colorimetry = DP_COLORIMETRY_BT709_YCC;
+               else
+                       vsc->colorimetry = DP_COLORIMETRY_DEFAULT;
+               break;
+       }
+
+       vsc->bpc = crtc_state->pipe_bpp / 3;
+
+       /* only RGB pixelformat supports 6 bpc */
+       drm_WARN_ON(&dev_priv->drm,
+                   vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB);
+
+       /* all YCbCr are always limited range */
+       vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA;
+       vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED;
+}
+
+static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp,
+                                    struct intel_crtc_state *crtc_state,
+                                    const struct drm_connector_state *conn_state)
+{
+       struct drm_dp_vsc_sdp *vsc = &crtc_state->infoframes.vsc;
+
+       /* When a crtc state has PSR, VSC SDP will be handled by PSR routine */
+       if (crtc_state->has_psr)
+               return;
+
+       if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state))
+               return;
+
+       crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
+       vsc->sdp_type = DP_SDP_VSC;
+       intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
+                                        &crtc_state->infoframes.vsc);
+}
+
+void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp,
+                                 const struct intel_crtc_state *crtc_state,
+                                 const struct drm_connector_state *conn_state,
+                                 struct drm_dp_vsc_sdp *vsc)
+{
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+       vsc->sdp_type = DP_SDP_VSC;
+
+       if (dev_priv->psr.psr2_enabled) {
+               if (dev_priv->psr.colorimetry_support &&
+                   intel_dp_needs_vsc_sdp(crtc_state, conn_state)) {
+                       /* [PSR2, +Colorimetry] */
+                       intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
+                                                        vsc);
+               } else {
+                       /*
+                        * [PSR2, -Colorimetry]
+                        * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11
+                        * 3D stereo + PSR/PSR2 + Y-coordinate.
+                        */
+                       vsc->revision = 0x4;
+                       vsc->length = 0xe;
+               }
+       } else {
+               /*
+                * [PSR1]
+                * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
+                * VSC SDP supporting 3D stereo + PSR (applies to eDP v1.3 or
+                * higher).
+                */
+               vsc->revision = 0x2;
+               vsc->length = 0x8;
+       }
+}
+
+static void
+intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
+                                           struct intel_crtc_state *crtc_state,
+                                           const struct drm_connector_state *conn_state)
+{
+       int ret;
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+       struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm;
+
+       if (!conn_state->hdr_output_metadata)
+               return;
+
+       ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state);
+
+       if (ret) {
+               drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n");
+               return;
+       }
+
+       crtc_state->infoframes.enable |=
+               intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA);
+}
+
 int
 intel_dp_compute_config(struct intel_encoder *encoder,
                        struct intel_crtc_state *pipe_config,
@@ -2394,7 +2570,6 @@ intel_dp_compute_config(struct intel_encoder *encoder,
        struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
        struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder);
        enum port port = encoder->port;
-       struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
        struct intel_connector *intel_connector = intel_dp->attached_connector;
        struct intel_digital_connector_state *intel_conn_state =
                to_intel_digital_connector_state(conn_state);
@@ -2410,9 +2585,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
        if (lspcon->active)
                lspcon_ycbcr420_config(&intel_connector->base, pipe_config);
        else
-               ret = intel_dp_ycbcr420_config(intel_dp, &intel_connector->base,
-                                              pipe_config);
-
+               ret = intel_dp_ycbcr420_config(intel_dp, pipe_config,
+                                              conn_state);
        if (ret)
                return ret;
 
@@ -2428,18 +2602,12 @@ intel_dp_compute_config(struct intel_encoder *encoder,
                intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
                                       adjusted_mode);
 
-               if (INTEL_GEN(dev_priv) >= 9) {
-                       ret = skl_update_scaler_crtc(pipe_config);
-                       if (ret)
-                               return ret;
-               }
-
                if (HAS_GMCH(dev_priv))
-                       intel_gmch_panel_fitting(intel_crtc, pipe_config,
-                                                conn_state->scaling_mode);
+                       ret = intel_gmch_panel_fitting(pipe_config, conn_state);
                else
-                       intel_pch_panel_fitting(intel_crtc, pipe_config,
-                                               conn_state->scaling_mode);
+                       ret = intel_pch_panel_fitting(pipe_config, conn_state);
+               if (ret)
+                       return ret;
        }
 
        if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -2489,6 +2657,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
                intel_dp_set_clock(encoder, pipe_config);
 
        intel_psr_compute_config(intel_dp, pipe_config);
+       intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state);
+       intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state);
 
        return 0;
 }
@@ -2630,22 +2800,27 @@ static void wait_panel_status(struct intel_dp *intel_dp,
 
 static void wait_panel_on(struct intel_dp *intel_dp)
 {
-       DRM_DEBUG_KMS("Wait for panel power on\n");
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+       drm_dbg_kms(&i915->drm, "Wait for panel power on\n");
        wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
 }
 
 static void wait_panel_off(struct intel_dp *intel_dp)
 {
-       DRM_DEBUG_KMS("Wait for panel power off time\n");
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+       drm_dbg_kms(&i915->drm, "Wait for panel power off time\n");
        wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
 }
 
 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
 {
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
        ktime_t panel_power_on_time;
        s64 panel_power_off_duration;
 
-       DRM_DEBUG_KMS("Wait for panel power cycle\n");
+       drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n");
 
        /* take the difference of currrent time and panel power off time
         * and then make panel wait for t11_t12 if needed. */
@@ -3009,11 +3184,12 @@ void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
                            const struct drm_connector_state *conn_state)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder));
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 
        if (!intel_dp_is_edp(intel_dp))
                return;
 
-       DRM_DEBUG_KMS("\n");
+       drm_dbg_kms(&i915->drm, "\n");
 
        intel_panel_enable_backlight(crtc_state, conn_state);
        _intel_edp_backlight_on(intel_dp);
@@ -3047,11 +3223,12 @@ static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder));
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 
        if (!intel_dp_is_edp(intel_dp))
                return;
 
-       DRM_DEBUG_KMS("\n");
+       drm_dbg_kms(&i915->drm, "\n");
 
        _intel_edp_backlight_off(intel_dp);
        intel_panel_disable_backlight(old_conn_state);
@@ -3064,6 +3241,7 @@ void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
 static void intel_edp_backlight_power(struct intel_connector *connector,
                                      bool enable)
 {
+       struct drm_i915_private *i915 = to_i915(connector->base.dev);
        struct intel_dp *intel_dp = intel_attached_dp(connector);
        intel_wakeref_t wakeref;
        bool is_enabled;
@@ -3074,8 +3252,8 @@ static void intel_edp_backlight_power(struct intel_connector *connector,
        if (is_enabled == enable)
                return;
 
-       DRM_DEBUG_KMS("panel power control backlight %s\n",
-                     enable ? "enable" : "disable");
+       drm_dbg_kms(&i915->drm, "panel power control backlight %s\n",
+                   enable ? "enable" : "disable");
 
        if (enable)
                _intel_edp_backlight_on(intel_dp);
@@ -3185,6 +3363,7 @@ void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
                                           const struct intel_crtc_state *crtc_state,
                                           bool enable)
 {
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
        int ret;
 
        if (!crtc_state->dsc.compression_enable)
@@ -3193,13 +3372,15 @@ void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
        ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
                                 enable ? DP_DECOMPRESSION_EN : 0);
        if (ret < 0)
-               DRM_DEBUG_KMS("Failed to %s sink decompression state\n",
-                             enable ? "enable" : "disable");
+               drm_dbg_kms(&i915->drm,
+                           "Failed to %s sink decompression state\n",
+                           enable ? "enable" : "disable");
 }
 
 /* If the sink supports it, try to set the power state appropriately */
 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
 {
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
        int ret, i;
 
        /* Should have a valid DPCD by this point */
@@ -3232,8 +3413,8 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
        }
 
        if (ret != 1)
-               DRM_DEBUG_KMS("failed to %s sink power state\n",
-                             mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
+               drm_dbg_kms(&i915->drm, "failed to %s sink power state\n",
+                           mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
 }
 
 static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
@@ -3390,7 +3571,8 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
        }
 }
 
-static void intel_disable_dp(struct intel_encoder *encoder,
+static void intel_disable_dp(struct intel_atomic_state *state,
+                            struct intel_encoder *encoder,
                             const struct intel_crtc_state *old_crtc_state,
                             const struct drm_connector_state *old_conn_state)
 {
@@ -3410,21 +3592,24 @@ static void intel_disable_dp(struct intel_encoder *encoder,
        intel_edp_panel_off(intel_dp);
 }
 
-static void g4x_disable_dp(struct intel_encoder *encoder,
+static void g4x_disable_dp(struct intel_atomic_state *state,
+                          struct intel_encoder *encoder,
                           const struct intel_crtc_state *old_crtc_state,
                           const struct drm_connector_state *old_conn_state)
 {
-       intel_disable_dp(encoder, old_crtc_state, old_conn_state);
+       intel_disable_dp(state, encoder, old_crtc_state, old_conn_state);
 }
 
-static void vlv_disable_dp(struct intel_encoder *encoder,
+static void vlv_disable_dp(struct intel_atomic_state *state,
+                          struct intel_encoder *encoder,
                           const struct intel_crtc_state *old_crtc_state,
                           const struct drm_connector_state *old_conn_state)
 {
-       intel_disable_dp(encoder, old_crtc_state, old_conn_state);
+       intel_disable_dp(state, encoder, old_crtc_state, old_conn_state);
 }
 
-static void g4x_post_disable_dp(struct intel_encoder *encoder,
+static void g4x_post_disable_dp(struct intel_atomic_state *state,
+                               struct intel_encoder *encoder,
                                const struct intel_crtc_state *old_crtc_state,
                                const struct drm_connector_state *old_conn_state)
 {
@@ -3444,14 +3629,16 @@ static void g4x_post_disable_dp(struct intel_encoder *encoder,
                ilk_edp_pll_off(intel_dp, old_crtc_state);
 }
 
-static void vlv_post_disable_dp(struct intel_encoder *encoder,
+static void vlv_post_disable_dp(struct intel_atomic_state *state,
+                               struct intel_encoder *encoder,
                                const struct intel_crtc_state *old_crtc_state,
                                const struct drm_connector_state *old_conn_state)
 {
        intel_dp_link_down(encoder, old_crtc_state);
 }
 
-static void chv_post_disable_dp(struct intel_encoder *encoder,
+static void chv_post_disable_dp(struct intel_atomic_state *state,
+                               struct intel_encoder *encoder,
                                const struct intel_crtc_state *old_crtc_state,
                                const struct drm_connector_state *old_conn_state)
 {
@@ -3468,90 +3655,63 @@ static void chv_post_disable_dp(struct intel_encoder *encoder,
 }
 
 static void
-_intel_dp_set_link_train(struct intel_dp *intel_dp,
-                        u32 *DP,
-                        u8 dp_train_pat)
+cpt_set_link_train(struct intel_dp *intel_dp,
+                  u8 dp_train_pat)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       enum port port = intel_dig_port->base.port;
-       u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
-
-       if (dp_train_pat & train_pat_mask)
-               drm_dbg_kms(&dev_priv->drm,
-                           "Using DP training pattern TPS%d\n",
-                           dp_train_pat & train_pat_mask);
-
-       if (HAS_DDI(dev_priv)) {
-               u32 temp = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
-
-               if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
-                       temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
-               else
-                       temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
+       u32 *DP = &intel_dp->DP;
 
-               temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
-               switch (dp_train_pat & train_pat_mask) {
-               case DP_TRAINING_PATTERN_DISABLE:
-                       temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
+       *DP &= ~DP_LINK_TRAIN_MASK_CPT;
 
-                       break;
-               case DP_TRAINING_PATTERN_1:
-                       temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
-                       break;
-               case DP_TRAINING_PATTERN_2:
-                       temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
-                       break;
-               case DP_TRAINING_PATTERN_3:
-                       temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
-                       break;
-               case DP_TRAINING_PATTERN_4:
-                       temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
-                       break;
-               }
-               intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, temp);
+       switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+       case DP_TRAINING_PATTERN_DISABLE:
+               *DP |= DP_LINK_TRAIN_OFF_CPT;
+               break;
+       case DP_TRAINING_PATTERN_1:
+               *DP |= DP_LINK_TRAIN_PAT_1_CPT;
+               break;
+       case DP_TRAINING_PATTERN_2:
+               *DP |= DP_LINK_TRAIN_PAT_2_CPT;
+               break;
+       case DP_TRAINING_PATTERN_3:
+               drm_dbg_kms(&dev_priv->drm,
+                           "TPS3 not supported, using TPS2 instead\n");
+               *DP |= DP_LINK_TRAIN_PAT_2_CPT;
+               break;
+       }
 
-       } else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
-                  (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
-               *DP &= ~DP_LINK_TRAIN_MASK_CPT;
+       intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+       intel_de_posting_read(dev_priv, intel_dp->output_reg);
+}
 
-               switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
-               case DP_TRAINING_PATTERN_DISABLE:
-                       *DP |= DP_LINK_TRAIN_OFF_CPT;
-                       break;
-               case DP_TRAINING_PATTERN_1:
-                       *DP |= DP_LINK_TRAIN_PAT_1_CPT;
-                       break;
-               case DP_TRAINING_PATTERN_2:
-                       *DP |= DP_LINK_TRAIN_PAT_2_CPT;
-                       break;
-               case DP_TRAINING_PATTERN_3:
-                       drm_dbg_kms(&dev_priv->drm,
-                                   "TPS3 not supported, using TPS2 instead\n");
-                       *DP |= DP_LINK_TRAIN_PAT_2_CPT;
-                       break;
-               }
+static void
+g4x_set_link_train(struct intel_dp *intel_dp,
+                  u8 dp_train_pat)
+{
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+       u32 *DP = &intel_dp->DP;
 
-       } else {
-               *DP &= ~DP_LINK_TRAIN_MASK;
+       *DP &= ~DP_LINK_TRAIN_MASK;
 
-               switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
-               case DP_TRAINING_PATTERN_DISABLE:
-                       *DP |= DP_LINK_TRAIN_OFF;
-                       break;
-               case DP_TRAINING_PATTERN_1:
-                       *DP |= DP_LINK_TRAIN_PAT_1;
-                       break;
-               case DP_TRAINING_PATTERN_2:
-                       *DP |= DP_LINK_TRAIN_PAT_2;
-                       break;
-               case DP_TRAINING_PATTERN_3:
-                       drm_dbg_kms(&dev_priv->drm,
-                                   "TPS3 not supported, using TPS2 instead\n");
-                       *DP |= DP_LINK_TRAIN_PAT_2;
-                       break;
-               }
+       switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+       case DP_TRAINING_PATTERN_DISABLE:
+               *DP |= DP_LINK_TRAIN_OFF;
+               break;
+       case DP_TRAINING_PATTERN_1:
+               *DP |= DP_LINK_TRAIN_PAT_1;
+               break;
+       case DP_TRAINING_PATTERN_2:
+               *DP |= DP_LINK_TRAIN_PAT_2;
+               break;
+       case DP_TRAINING_PATTERN_3:
+               drm_dbg_kms(&dev_priv->drm,
+                           "TPS3 not supported, using TPS2 instead\n");
+               *DP |= DP_LINK_TRAIN_PAT_2;
+               break;
        }
+
+       intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+       intel_de_posting_read(dev_priv, intel_dp->output_reg);
 }
 
 static void intel_dp_enable_port(struct intel_dp *intel_dp,
@@ -3577,7 +3737,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp,
        intel_de_posting_read(dev_priv, intel_dp->output_reg);
 }
 
-static void intel_enable_dp(struct intel_encoder *encoder,
+static void intel_enable_dp(struct intel_atomic_state *state,
+                           struct intel_encoder *encoder,
                            const struct intel_crtc_state *pipe_config,
                            const struct drm_connector_state *conn_state)
 {
@@ -3623,22 +3784,25 @@ static void intel_enable_dp(struct intel_encoder *encoder,
        }
 }
 
-static void g4x_enable_dp(struct intel_encoder *encoder,
+static void g4x_enable_dp(struct intel_atomic_state *state,
+                         struct intel_encoder *encoder,
                          const struct intel_crtc_state *pipe_config,
                          const struct drm_connector_state *conn_state)
 {
-       intel_enable_dp(encoder, pipe_config, conn_state);
+       intel_enable_dp(state, encoder, pipe_config, conn_state);
        intel_edp_backlight_on(pipe_config, conn_state);
 }
 
-static void vlv_enable_dp(struct intel_encoder *encoder,
+static void vlv_enable_dp(struct intel_atomic_state *state,
+                         struct intel_encoder *encoder,
                          const struct intel_crtc_state *pipe_config,
                          const struct drm_connector_state *conn_state)
 {
        intel_edp_backlight_on(pipe_config, conn_state);
 }
 
-static void g4x_pre_enable_dp(struct intel_encoder *encoder,
+static void g4x_pre_enable_dp(struct intel_atomic_state *state,
+                             struct intel_encoder *encoder,
                              const struct intel_crtc_state *pipe_config,
                              const struct drm_connector_state *conn_state)
 {
@@ -3758,16 +3922,18 @@ static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
        intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
 }
 
-static void vlv_pre_enable_dp(struct intel_encoder *encoder,
+static void vlv_pre_enable_dp(struct intel_atomic_state *state,
+                             struct intel_encoder *encoder,
                              const struct intel_crtc_state *pipe_config,
                              const struct drm_connector_state *conn_state)
 {
        vlv_phy_pre_encoder_enable(encoder, pipe_config);
 
-       intel_enable_dp(encoder, pipe_config, conn_state);
+       intel_enable_dp(state, encoder, pipe_config, conn_state);
 }
 
-static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
+static void vlv_dp_pre_pll_enable(struct intel_atomic_state *state,
+                                 struct intel_encoder *encoder,
                                  const struct intel_crtc_state *pipe_config,
                                  const struct drm_connector_state *conn_state)
 {
@@ -3776,19 +3942,21 @@ static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
        vlv_phy_pre_pll_enable(encoder, pipe_config);
 }
 
-static void chv_pre_enable_dp(struct intel_encoder *encoder,
+static void chv_pre_enable_dp(struct intel_atomic_state *state,
+                             struct intel_encoder *encoder,
                              const struct intel_crtc_state *pipe_config,
                              const struct drm_connector_state *conn_state)
 {
        chv_phy_pre_encoder_enable(encoder, pipe_config);
 
-       intel_enable_dp(encoder, pipe_config, conn_state);
+       intel_enable_dp(state, encoder, pipe_config, conn_state);
 
        /* Second common lane will stay alive on its own now */
        chv_phy_release_cl2_override(encoder);
 }
 
-static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
+static void chv_dp_pre_pll_enable(struct intel_atomic_state *state,
+                                 struct intel_encoder *encoder,
                                  const struct intel_crtc_state *pipe_config,
                                  const struct drm_connector_state *conn_state)
 {
@@ -3797,7 +3965,8 @@ static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
        chv_phy_pre_pll_enable(encoder, pipe_config);
 }
 
-static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
+static void chv_dp_post_pll_disable(struct intel_atomic_state *state,
+                                   struct intel_encoder *encoder,
                                    const struct intel_crtc_state *old_crtc_state,
                                    const struct drm_connector_state *old_conn_state)
 {
@@ -3881,7 +4050,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing)
        }
 }
 
-static u32 vlv_signal_levels(struct intel_dp *intel_dp)
+static void vlv_set_signal_levels(struct intel_dp *intel_dp)
 {
        struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
        unsigned long demph_reg_value, preemph_reg_value,
@@ -3909,7 +4078,7 @@ static u32 vlv_signal_levels(struct intel_dp *intel_dp)
                        uniqtranscale_reg_value = 0x5598DA3A;
                        break;
                default:
-                       return 0;
+                       return;
                }
                break;
        case DP_TRAIN_PRE_EMPH_LEVEL_1:
@@ -3928,7 +4097,7 @@ static u32 vlv_signal_levels(struct intel_dp *intel_dp)
                        uniqtranscale_reg_value = 0x55ADDA3A;
                        break;
                default:
-                       return 0;
+                       return;
                }
                break;
        case DP_TRAIN_PRE_EMPH_LEVEL_2:
@@ -3943,7 +4112,7 @@ static u32 vlv_signal_levels(struct intel_dp *intel_dp)
                        uniqtranscale_reg_value = 0x55ADDA3A;
                        break;
                default:
-                       return 0;
+                       return;
                }
                break;
        case DP_TRAIN_PRE_EMPH_LEVEL_3:
@@ -3954,20 +4123,18 @@ static u32 vlv_signal_levels(struct intel_dp *intel_dp)
                        uniqtranscale_reg_value = 0x55ADDA3A;
                        break;
                default:
-                       return 0;
+                       return;
                }
                break;
        default:
-               return 0;
+               return;
        }
 
        vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
                                 uniqtranscale_reg_value, 0);
-
-       return 0;
 }
 
-static u32 chv_signal_levels(struct intel_dp *intel_dp)
+static void chv_set_signal_levels(struct intel_dp *intel_dp)
 {
        struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
        u32 deemph_reg_value, margin_reg_value;
@@ -3995,7 +4162,7 @@ static u32 chv_signal_levels(struct intel_dp *intel_dp)
                        uniq_trans_scale = true;
                        break;
                default:
-                       return 0;
+                       return;
                }
                break;
        case DP_TRAIN_PRE_EMPH_LEVEL_1:
@@ -4013,7 +4180,7 @@ static u32 chv_signal_levels(struct intel_dp *intel_dp)
                        margin_reg_value = 154;
                        break;
                default:
-                       return 0;
+                       return;
                }
                break;
        case DP_TRAIN_PRE_EMPH_LEVEL_2:
@@ -4027,7 +4194,7 @@ static u32 chv_signal_levels(struct intel_dp *intel_dp)
                        margin_reg_value = 154;
                        break;
                default:
-                       return 0;
+                       return;
                }
                break;
        case DP_TRAIN_PRE_EMPH_LEVEL_3:
@@ -4037,21 +4204,18 @@ static u32 chv_signal_levels(struct intel_dp *intel_dp)
                        margin_reg_value = 154;
                        break;
                default:
-                       return 0;
+                       return;
                }
                break;
        default:
-               return 0;
+               return;
        }
 
        chv_set_phy_signal_level(encoder, deemph_reg_value,
                                 margin_reg_value, uniq_trans_scale);
-
-       return 0;
 }
 
-static u32
-g4x_signal_levels(u8 train_set)
+static u32 g4x_signal_levels(u8 train_set)
 {
        u32 signal_levels = 0;
 
@@ -4088,12 +4252,31 @@ g4x_signal_levels(u8 train_set)
        return signal_levels;
 }
 
+static void
+g4x_set_signal_levels(struct intel_dp *intel_dp)
+{
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+       u8 train_set = intel_dp->train_set[0];
+       u32 signal_levels;
+
+       signal_levels = g4x_signal_levels(train_set);
+
+       drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
+                   signal_levels);
+
+       intel_dp->DP &= ~(DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK);
+       intel_dp->DP |= signal_levels;
+
+       intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+       intel_de_posting_read(dev_priv, intel_dp->output_reg);
+}
+
 /* SNB CPU eDP voltage swing and pre-emphasis control */
-static u32
-snb_cpu_edp_signal_levels(u8 train_set)
+static u32 snb_cpu_edp_signal_levels(u8 train_set)
 {
-       int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
-                                        DP_TRAIN_PRE_EMPHASIS_MASK);
+       u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+                                       DP_TRAIN_PRE_EMPHASIS_MASK);
+
        switch (signal_levels) {
        case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
        case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
@@ -4116,12 +4299,31 @@ snb_cpu_edp_signal_levels(u8 train_set)
        }
 }
 
+static void
+snb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp)
+{
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+       u8 train_set = intel_dp->train_set[0];
+       u32 signal_levels;
+
+       signal_levels = snb_cpu_edp_signal_levels(train_set);
+
+       drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
+                   signal_levels);
+
+       intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
+       intel_dp->DP |= signal_levels;
+
+       intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+       intel_de_posting_read(dev_priv, intel_dp->output_reg);
+}
+
 /* IVB CPU eDP voltage swing and pre-emphasis control */
-static u32
-ivb_cpu_edp_signal_levels(u8 train_set)
+static u32 ivb_cpu_edp_signal_levels(u8 train_set)
 {
-       int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
-                                        DP_TRAIN_PRE_EMPHASIS_MASK);
+       u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+                                       DP_TRAIN_PRE_EMPHASIS_MASK);
+
        switch (signal_levels) {
        case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
                return EDP_LINK_TRAIN_400MV_0DB_IVB;
@@ -4147,97 +4349,61 @@ ivb_cpu_edp_signal_levels(u8 train_set)
        }
 }
 
-void
-intel_dp_set_signal_levels(struct intel_dp *intel_dp)
+static void
+ivb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       enum port port = intel_dig_port->base.port;
-       u32 signal_levels, mask = 0;
        u8 train_set = intel_dp->train_set[0];
+       u32 signal_levels;
 
-       if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
-               signal_levels = bxt_signal_levels(intel_dp);
-       } else if (HAS_DDI(dev_priv)) {
-               signal_levels = ddi_signal_levels(intel_dp);
-               mask = DDI_BUF_EMP_MASK;
-       } else if (IS_CHERRYVIEW(dev_priv)) {
-               signal_levels = chv_signal_levels(intel_dp);
-       } else if (IS_VALLEYVIEW(dev_priv)) {
-               signal_levels = vlv_signal_levels(intel_dp);
-       } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
-               signal_levels = ivb_cpu_edp_signal_levels(train_set);
-               mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
-       } else if (IS_GEN(dev_priv, 6) && port == PORT_A) {
-               signal_levels = snb_cpu_edp_signal_levels(train_set);
-               mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
-       } else {
-               signal_levels = g4x_signal_levels(train_set);
-               mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
-       }
+       signal_levels = ivb_cpu_edp_signal_levels(train_set);
 
-       if (mask)
-               drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
-                           signal_levels);
+       drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
+                   signal_levels);
 
-       drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s\n",
-                   train_set & DP_TRAIN_VOLTAGE_SWING_MASK,
-                   train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : "");
+       intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
+       intel_dp->DP |= signal_levels;
+
+       intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+       intel_de_posting_read(dev_priv, intel_dp->output_reg);
+}
+
+void intel_dp_set_signal_levels(struct intel_dp *intel_dp)
+{
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+       u8 train_set = intel_dp->train_set[0];
+
+       drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s\n",
+                   train_set & DP_TRAIN_VOLTAGE_SWING_MASK,
+                   train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : "");
        drm_dbg_kms(&dev_priv->drm, "Using pre-emphasis level %d%s\n",
                    (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
                    DP_TRAIN_PRE_EMPHASIS_SHIFT,
                    train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ?
                    " (max)" : "");
 
-       intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
-
-       intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
-       intel_de_posting_read(dev_priv, intel_dp->output_reg);
+       intel_dp->set_signal_levels(intel_dp);
 }
 
 void
 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
                                       u8 dp_train_pat)
 {
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct drm_i915_private *dev_priv =
-               to_i915(intel_dig_port->base.base.dev);
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+       u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
 
-       _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
+       if (dp_train_pat & train_pat_mask)
+               drm_dbg_kms(&dev_priv->drm,
+                           "Using DP training pattern TPS%d\n",
+                           dp_train_pat & train_pat_mask);
 
-       intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
-       intel_de_posting_read(dev_priv, intel_dp->output_reg);
+       intel_dp->set_link_train(intel_dp, dp_train_pat);
 }
 
 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
 {
-       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       enum port port = intel_dig_port->base.port;
-       u32 val;
-
-       if (!HAS_DDI(dev_priv))
-               return;
-
-       val = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
-       val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
-       val |= DP_TP_CTL_LINK_TRAIN_IDLE;
-       intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, val);
-
-       /*
-        * Until TGL on PORT_A we can have only eDP in SST mode. There the only
-        * reason we need to set idle transmission mode is to work around a HW
-        * issue where we enable the pipe while not in idle link-training mode.
-        * In this case there is requirement to wait for a minimum number of
-        * idle patterns to be sent.
-        */
-       if (port == PORT_A && INTEL_GEN(dev_priv) < 12)
-               return;
-
-       if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
-                                 DP_TP_STATUS_IDLE_DONE, 1))
-               drm_err(&dev_priv->drm,
-                       "Timed out waiting for DP idle patterns\n");
+       if (intel_dp->set_idle_link_train)
+               intel_dp->set_idle_link_train(intel_dp);
 }
 
 static void
@@ -4316,6 +4482,7 @@ intel_dp_link_down(struct intel_encoder *encoder,
 static void
 intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp)
 {
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
        u8 dpcd_ext[6];
 
        /*
@@ -4331,20 +4498,22 @@ intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp)
 
        if (drm_dp_dpcd_read(&intel_dp->aux, DP_DP13_DPCD_REV,
                             &dpcd_ext, sizeof(dpcd_ext)) != sizeof(dpcd_ext)) {
-               DRM_ERROR("DPCD failed read at extended capabilities\n");
+               drm_err(&i915->drm,
+                       "DPCD failed read at extended capabilities\n");
                return;
        }
 
        if (intel_dp->dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
-               DRM_DEBUG_KMS("DPCD extended DPCD rev less than base DPCD rev\n");
+               drm_dbg_kms(&i915->drm,
+                           "DPCD extended DPCD rev less than base DPCD rev\n");
                return;
        }
 
        if (!memcmp(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext)))
                return;
 
-       DRM_DEBUG_KMS("Base DPCD: %*ph\n",
-                     (int)sizeof(intel_dp->dpcd), intel_dp->dpcd);
+       drm_dbg_kms(&i915->drm, "Base DPCD: %*ph\n",
+                   (int)sizeof(intel_dp->dpcd), intel_dp->dpcd);
 
        memcpy(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext));
 }
@@ -4352,13 +4521,16 @@ intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp)
 bool
 intel_dp_read_dpcd(struct intel_dp *intel_dp)
 {
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
        if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
                             sizeof(intel_dp->dpcd)) < 0)
                return false; /* aux transfer failed */
 
        intel_dp_extended_receiver_capabilities(intel_dp);
 
-       DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
+       drm_dbg_kms(&i915->drm, "DPCD: %*ph\n", (int)sizeof(intel_dp->dpcd),
+                   intel_dp->dpcd);
 
        return intel_dp->dpcd[DP_DPCD_REV] != 0;
 }
@@ -4375,6 +4547,8 @@ bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
 
 static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
 {
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
        /*
         * Clear the cached register set to avoid using stale values
         * for the sinks that do not support DSC.
@@ -4390,20 +4564,23 @@ static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
                if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT,
                                     intel_dp->dsc_dpcd,
                                     sizeof(intel_dp->dsc_dpcd)) < 0)
-                       DRM_ERROR("Failed to read DPCD register 0x%x\n",
-                                 DP_DSC_SUPPORT);
+                       drm_err(&i915->drm,
+                               "Failed to read DPCD register 0x%x\n",
+                               DP_DSC_SUPPORT);
 
-               DRM_DEBUG_KMS("DSC DPCD: %*ph\n",
-                             (int)sizeof(intel_dp->dsc_dpcd),
-                             intel_dp->dsc_dpcd);
+               drm_dbg_kms(&i915->drm, "DSC DPCD: %*ph\n",
+                           (int)sizeof(intel_dp->dsc_dpcd),
+                           intel_dp->dsc_dpcd);
 
                /* FEC is supported only on DP 1.4 */
                if (!intel_dp_is_edp(intel_dp) &&
                    drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
                                      &intel_dp->fec_capable) < 0)
-                       DRM_ERROR("Failed to read FEC DPCD register\n");
+                       drm_err(&i915->drm,
+                               "Failed to read FEC DPCD register\n");
 
-               DRM_DEBUG_KMS("FEC CAPABILITY: %x\n", intel_dp->fec_capable);
+               drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n",
+                           intel_dp->fec_capable);
        }
 }
 
@@ -4577,14 +4754,16 @@ intel_dp_can_mst(struct intel_dp *intel_dp)
 static void
 intel_dp_configure_mst(struct intel_dp *intel_dp)
 {
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
        struct intel_encoder *encoder =
                &dp_to_dig_port(intel_dp)->base;
        bool sink_can_mst = intel_dp_sink_can_mst(intel_dp);
 
-       DRM_DEBUG_KMS("[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n",
-                     encoder->base.base.id, encoder->base.name,
-                     yesno(intel_dp->can_mst), yesno(sink_can_mst),
-                     yesno(i915_modparams.enable_dp_mst));
+       drm_dbg_kms(&i915->drm,
+                   "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n",
+                   encoder->base.base.id, encoder->base.name,
+                   yesno(intel_dp->can_mst), yesno(sink_can_mst),
+                   yesno(i915_modparams.enable_dp_mst));
 
        if (!intel_dp->can_mst)
                return;
@@ -4630,158 +4809,92 @@ intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
        return false;
 }
 
-static void
-intel_dp_setup_vsc_sdp(struct intel_dp *intel_dp,
-                      const struct intel_crtc_state *crtc_state,
-                      const struct drm_connector_state *conn_state)
+static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc,
+                                    struct dp_sdp *sdp, size_t size)
 {
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct dp_sdp vsc_sdp = {};
+       size_t length = sizeof(struct dp_sdp);
+
+       if (size < length)
+               return -ENOSPC;
 
-       /* Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119 */
-       vsc_sdp.sdp_header.HB0 = 0;
-       vsc_sdp.sdp_header.HB1 = 0x7;
+       memset(sdp, 0, size);
 
        /*
-        * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
-        * Colorimetry Format indication.
+        * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119
+        * VSC SDP Header Bytes
         */
-       vsc_sdp.sdp_header.HB2 = 0x5;
+       sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */
+       sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */
+       sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */
+       sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */
 
        /*
-        * VSC SDP supporting 3D stereo, + PSR2, + Pixel Encoding/
-        * Colorimetry Format indication (HB2 = 05h).
+        * Only revision 0x5 supports Pixel Encoding/Colorimetry Format as
+        * per DP 1.4a spec.
         */
-       vsc_sdp.sdp_header.HB3 = 0x13;
-
-       /* DP 1.4a spec, Table 2-120 */
-       switch (crtc_state->output_format) {
-       case INTEL_OUTPUT_FORMAT_YCBCR444:
-               vsc_sdp.db[16] = 0x1 << 4; /* YCbCr 444 : DB16[7:4] = 1h */
-               break;
-       case INTEL_OUTPUT_FORMAT_YCBCR420:
-               vsc_sdp.db[16] = 0x3 << 4; /* YCbCr 420 : DB16[7:4] = 3h */
-               break;
-       case INTEL_OUTPUT_FORMAT_RGB:
-       default:
-               /* RGB: DB16[7:4] = 0h */
-               break;
-       }
+       if (vsc->revision != 0x5)
+               goto out;
 
-       switch (conn_state->colorspace) {
-       case DRM_MODE_COLORIMETRY_BT709_YCC:
-               vsc_sdp.db[16] |= 0x1;
-               break;
-       case DRM_MODE_COLORIMETRY_XVYCC_601:
-               vsc_sdp.db[16] |= 0x2;
-               break;
-       case DRM_MODE_COLORIMETRY_XVYCC_709:
-               vsc_sdp.db[16] |= 0x3;
-               break;
-       case DRM_MODE_COLORIMETRY_SYCC_601:
-               vsc_sdp.db[16] |= 0x4;
-               break;
-       case DRM_MODE_COLORIMETRY_OPYCC_601:
-               vsc_sdp.db[16] |= 0x5;
-               break;
-       case DRM_MODE_COLORIMETRY_BT2020_CYCC:
-       case DRM_MODE_COLORIMETRY_BT2020_RGB:
-               vsc_sdp.db[16] |= 0x6;
-               break;
-       case DRM_MODE_COLORIMETRY_BT2020_YCC:
-               vsc_sdp.db[16] |= 0x7;
-               break;
-       case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65:
-       case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER:
-               vsc_sdp.db[16] |= 0x4; /* DCI-P3 (SMPTE RP 431-2) */
-               break;
-       default:
-               /* sRGB (IEC 61966-2-1) / ITU-R BT.601: DB16[0:3] = 0h */
+       /* VSC SDP Payload for DB16 through DB18 */
+       /* Pixel Encoding and Colorimetry Formats  */
+       sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */
+       sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */
 
-               /* RGB->YCBCR color conversion uses the BT.709 color space. */
-               if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
-                       vsc_sdp.db[16] |= 0x1; /* 0x1, ITU-R BT.709 */
+       switch (vsc->bpc) {
+       case 6:
+               /* 6bpc: 0x0 */
                break;
-       }
-
-       /*
-        * For pixel encoding formats YCbCr444, YCbCr422, YCbCr420, and Y Only,
-        * the following Component Bit Depth values are defined:
-        * 001b = 8bpc.
-        * 010b = 10bpc.
-        * 011b = 12bpc.
-        * 100b = 16bpc.
-        */
-       switch (crtc_state->pipe_bpp) {
-       case 24: /* 8bpc */
-               vsc_sdp.db[17] = 0x1;
+       case 8:
+               sdp->db[17] = 0x1; /* DB17[3:0] */
                break;
-       case 30: /* 10bpc */
-               vsc_sdp.db[17] = 0x2;
+       case 10:
+               sdp->db[17] = 0x2;
                break;
-       case 36: /* 12bpc */
-               vsc_sdp.db[17] = 0x3;
+       case 12:
+               sdp->db[17] = 0x3;
                break;
-       case 48: /* 16bpc */
-               vsc_sdp.db[17] = 0x4;
+       case 16:
+               sdp->db[17] = 0x4;
                break;
        default:
-               MISSING_CASE(crtc_state->pipe_bpp);
+               MISSING_CASE(vsc->bpc);
                break;
        }
+       /* Dynamic Range and Component Bit Depth */
+       if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA)
+               sdp->db[17] |= 0x80;  /* DB17[7] */
 
-       /*
-        * Dynamic Range (Bit 7)
-        * 0 = VESA range, 1 = CTA range.
-        * all YCbCr are always limited range
-        */
-       vsc_sdp.db[17] |= 0x80;
-
-       /*
-        * Content Type (Bits 2:0)
-        * 000b = Not defined.
-        * 001b = Graphics.
-        * 010b = Photo.
-        * 011b = Video.
-        * 100b = Game
-        * All other values are RESERVED.
-        * Note: See CTA-861-G for the definition and expected
-        * processing by a stream sink for the above contect types.
-        */
-       vsc_sdp.db[18] = 0;
+       /* Content Type */
+       sdp->db[18] = vsc->content_type & 0x7;
 
-       intel_dig_port->write_infoframe(&intel_dig_port->base,
-                       crtc_state, DP_SDP_VSC, &vsc_sdp, sizeof(vsc_sdp));
+out:
+       return length;
 }
 
-static void
-intel_dp_setup_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
-                                         const struct intel_crtc_state *crtc_state,
-                                         const struct drm_connector_state *conn_state)
+static ssize_t
+intel_dp_hdr_metadata_infoframe_sdp_pack(const struct hdmi_drm_infoframe *drm_infoframe,
+                                        struct dp_sdp *sdp,
+                                        size_t size)
 {
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct dp_sdp infoframe_sdp = {};
-       struct hdmi_drm_infoframe drm_infoframe = {};
+       size_t length = sizeof(struct dp_sdp);
        const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE;
        unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE];
        ssize_t len;
-       int ret;
 
-       ret = drm_hdmi_infoframe_set_hdr_metadata(&drm_infoframe, conn_state);
-       if (ret) {
-               DRM_DEBUG_KMS("couldn't set HDR metadata in infoframe\n");
-               return;
-       }
+       if (size < length)
+               return -ENOSPC;
+
+       memset(sdp, 0, size);
 
-       len = hdmi_drm_infoframe_pack_only(&drm_infoframe, buf, sizeof(buf));
+       len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf));
        if (len < 0) {
                DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n");
-               return;
+               return -ENOSPC;
        }
 
        if (len != infoframe_size) {
                DRM_DEBUG_KMS("wrong static hdr metadata size\n");
-               return;
+               return -ENOSPC;
        }
 
        /*
@@ -4790,34 +4903,37 @@ intel_dp_setup_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
         * Table 2-100 and Table 2-101
         */
 
-       /* Packet ID, 00h for non-Audio INFOFRAME */
-       infoframe_sdp.sdp_header.HB0 = 0;
+       /* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */
+       sdp->sdp_header.HB0 = 0;
        /*
         * Packet Type 80h + Non-audio INFOFRAME Type value
-        * HDMI_INFOFRAME_TYPE_DRM: 0x87,
+        * HDMI_INFOFRAME_TYPE_DRM: 0x87
+        * - 80h + Non-audio INFOFRAME Type value
+        * - InfoFrame Type: 0x07
+        *    [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame]
         */
-       infoframe_sdp.sdp_header.HB1 = drm_infoframe.type;
+       sdp->sdp_header.HB1 = drm_infoframe->type;
        /*
         * Least Significant Eight Bits of (Data Byte Count – 1)
-        * infoframe_size - 1,
+        * infoframe_size - 1
         */
-       infoframe_sdp.sdp_header.HB2 = 0x1D;
+       sdp->sdp_header.HB2 = 0x1D;
        /* INFOFRAME SDP Version Number */
-       infoframe_sdp.sdp_header.HB3 = (0x13 << 2);
+       sdp->sdp_header.HB3 = (0x13 << 2);
        /* CTA Header Byte 2 (INFOFRAME Version Number) */
-       infoframe_sdp.db[0] = drm_infoframe.version;
+       sdp->db[0] = drm_infoframe->version;
        /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
-       infoframe_sdp.db[1] = drm_infoframe.length;
+       sdp->db[1] = drm_infoframe->length;
        /*
         * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after
         * HDMI_INFOFRAME_HEADER_SIZE
         */
-       BUILD_BUG_ON(sizeof(infoframe_sdp.db) < HDMI_DRM_INFOFRAME_SIZE + 2);
-       memcpy(&infoframe_sdp.db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE],
+       BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2);
+       memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE],
               HDMI_DRM_INFOFRAME_SIZE);
 
        /*
-        * Size of DP infoframe sdp packet for HDR static metadata is consist of
+        * Size of DP infoframe sdp packet for HDR static metadata consists of
         * - DP SDP Header(struct dp_sdp_header): 4 bytes
         * - Two Data Blocks: 2 bytes
         *    CTA Header Byte2 (INFOFRAME Version Number)
@@ -4828,36 +4944,286 @@ intel_dp_setup_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
         * infoframe size. But GEN11+ has larger than that size, write_infoframe
         * will pad rest of the size.
         */
-       intel_dig_port->write_infoframe(&intel_dig_port->base, crtc_state,
-                                       HDMI_PACKET_TYPE_GAMUT_METADATA,
-                                       &infoframe_sdp,
-                                       sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE);
+       return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE;
 }
 
-void intel_dp_vsc_enable(struct intel_dp *intel_dp,
-                        const struct intel_crtc_state *crtc_state,
-                        const struct drm_connector_state *conn_state)
+static void intel_write_dp_sdp(struct intel_encoder *encoder,
+                              const struct intel_crtc_state *crtc_state,
+                              unsigned int type)
 {
-       if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state))
+       struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct dp_sdp sdp = {};
+       ssize_t len;
+
+       if ((crtc_state->infoframes.enable &
+            intel_hdmi_infoframe_enable(type)) == 0)
                return;
 
-       intel_dp_setup_vsc_sdp(intel_dp, crtc_state, conn_state);
+       switch (type) {
+       case DP_SDP_VSC:
+               len = intel_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp,
+                                           sizeof(sdp));
+               break;
+       case HDMI_PACKET_TYPE_GAMUT_METADATA:
+               len = intel_dp_hdr_metadata_infoframe_sdp_pack(&crtc_state->infoframes.drm.drm,
+                                                              &sdp, sizeof(sdp));
+               break;
+       default:
+               MISSING_CASE(type);
+               return;
+       }
+
+       if (drm_WARN_ON(&dev_priv->drm, len < 0))
+               return;
+
+       intel_dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len);
 }
 
-void intel_dp_hdr_metadata_enable(struct intel_dp *intel_dp,
-                                 const struct intel_crtc_state *crtc_state,
-                                 const struct drm_connector_state *conn_state)
+void intel_write_dp_vsc_sdp(struct intel_encoder *encoder,
+                           const struct intel_crtc_state *crtc_state,
+                           struct drm_dp_vsc_sdp *vsc)
 {
-       if (!conn_state->hdr_output_metadata)
+       struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct dp_sdp sdp = {};
+       ssize_t len;
+
+       len = intel_dp_vsc_sdp_pack(vsc, &sdp, sizeof(sdp));
+
+       if (drm_WARN_ON(&dev_priv->drm, len < 0))
+               return;
+
+       intel_dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC,
+                                       &sdp, len);
+}
+
+void intel_dp_set_infoframes(struct intel_encoder *encoder,
+                            bool enable,
+                            const struct intel_crtc_state *crtc_state,
+                            const struct drm_connector_state *conn_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+       i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder);
+       u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
+                        VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW |
+                        VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK;
+       u32 val = intel_de_read(dev_priv, reg);
+
+       /* TODO: Add DSC case (DIP_ENABLE_PPS) */
+       /* When PSR is enabled, this routine doesn't disable VSC DIP */
+       if (intel_psr_enabled(intel_dp))
+               val &= ~dip_enable;
+       else
+               val &= ~(dip_enable | VIDEO_DIP_ENABLE_VSC_HSW);
+
+       if (!enable) {
+               intel_de_write(dev_priv, reg, val);
+               intel_de_posting_read(dev_priv, reg);
+               return;
+       }
+
+       intel_de_write(dev_priv, reg, val);
+       intel_de_posting_read(dev_priv, reg);
+
+       /* When PSR is enabled, VSC SDP is handled by PSR routine */
+       if (!intel_psr_enabled(intel_dp))
+               intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC);
+
+       intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA);
+}
+
+static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc,
+                                  const void *buffer, size_t size)
+{
+       const struct dp_sdp *sdp = buffer;
+
+       if (size < sizeof(struct dp_sdp))
+               return -EINVAL;
+
+       memset(vsc, 0, size);
+
+       if (sdp->sdp_header.HB0 != 0)
+               return -EINVAL;
+
+       if (sdp->sdp_header.HB1 != DP_SDP_VSC)
+               return -EINVAL;
+
+       vsc->sdp_type = sdp->sdp_header.HB1;
+       vsc->revision = sdp->sdp_header.HB2;
+       vsc->length = sdp->sdp_header.HB3;
+
+       if ((sdp->sdp_header.HB2 == 0x2 && sdp->sdp_header.HB3 == 0x8) ||
+           (sdp->sdp_header.HB2 == 0x4 && sdp->sdp_header.HB3 == 0xe)) {
+               /*
+                * - HB2 = 0x2, HB3 = 0x8
+                *   VSC SDP supporting 3D stereo + PSR
+                * - HB2 = 0x4, HB3 = 0xe
+                *   VSC SDP supporting 3D stereo + PSR2 with Y-coordinate of
+                *   first scan line of the SU region (applies to eDP v1.4b
+                *   and higher).
+                */
+               return 0;
+       } else if (sdp->sdp_header.HB2 == 0x5 && sdp->sdp_header.HB3 == 0x13) {
+               /*
+                * - HB2 = 0x5, HB3 = 0x13
+                *   VSC SDP supporting 3D stereo + PSR2 + Pixel Encoding/Colorimetry
+                *   Format.
+                */
+               vsc->pixelformat = (sdp->db[16] >> 4) & 0xf;
+               vsc->colorimetry = sdp->db[16] & 0xf;
+               vsc->dynamic_range = (sdp->db[17] >> 7) & 0x1;
+
+               switch (sdp->db[17] & 0x7) {
+               case 0x0:
+                       vsc->bpc = 6;
+                       break;
+               case 0x1:
+                       vsc->bpc = 8;
+                       break;
+               case 0x2:
+                       vsc->bpc = 10;
+                       break;
+               case 0x3:
+                       vsc->bpc = 12;
+                       break;
+               case 0x4:
+                       vsc->bpc = 16;
+                       break;
+               default:
+                       MISSING_CASE(sdp->db[17] & 0x7);
+                       return -EINVAL;
+               }
+
+               vsc->content_type = sdp->db[18] & 0x7;
+       } else {
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int
+intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe,
+                                          const void *buffer, size_t size)
+{
+       int ret;
+
+       const struct dp_sdp *sdp = buffer;
+
+       if (size < sizeof(struct dp_sdp))
+               return -EINVAL;
+
+       if (sdp->sdp_header.HB0 != 0)
+               return -EINVAL;
+
+       if (sdp->sdp_header.HB1 != HDMI_INFOFRAME_TYPE_DRM)
+               return -EINVAL;
+
+       /*
+        * Least Significant Eight Bits of (Data Byte Count – 1)
+        * 1Dh (i.e., Data Byte Count = 30 bytes).
+        */
+       if (sdp->sdp_header.HB2 != 0x1D)
+               return -EINVAL;
+
+       /* Most Significant Two Bits of (Data Byte Count – 1), Clear to 00b. */
+       if ((sdp->sdp_header.HB3 & 0x3) != 0)
+               return -EINVAL;
+
+       /* INFOFRAME SDP Version Number */
+       if (((sdp->sdp_header.HB3 >> 2) & 0x3f) != 0x13)
+               return -EINVAL;
+
+       /* CTA Header Byte 2 (INFOFRAME Version Number) */
+       if (sdp->db[0] != 1)
+               return -EINVAL;
+
+       /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
+       if (sdp->db[1] != HDMI_DRM_INFOFRAME_SIZE)
+               return -EINVAL;
+
+       ret = hdmi_drm_infoframe_unpack_only(drm_infoframe, &sdp->db[2],
+                                            HDMI_DRM_INFOFRAME_SIZE);
+
+       return ret;
+}
+
+static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder,
+                                 struct intel_crtc_state *crtc_state,
+                                 struct drm_dp_vsc_sdp *vsc)
+{
+       struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+       struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       unsigned int type = DP_SDP_VSC;
+       struct dp_sdp sdp = {};
+       int ret;
+
+       /* When PSR is enabled, VSC SDP is handled by PSR routine */
+       if (intel_psr_enabled(intel_dp))
+               return;
+
+       if ((crtc_state->infoframes.enable &
+            intel_hdmi_infoframe_enable(type)) == 0)
                return;
 
-       intel_dp_setup_hdr_metadata_infoframe_sdp(intel_dp,
-                                                 crtc_state,
-                                                 conn_state);
+       intel_dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp));
+
+       ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp));
+
+       if (ret)
+               drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n");
+}
+
+static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder,
+                                                    struct intel_crtc_state *crtc_state,
+                                                    struct hdmi_drm_infoframe *drm_infoframe)
+{
+       struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA;
+       struct dp_sdp sdp = {};
+       int ret;
+
+       if ((crtc_state->infoframes.enable &
+           intel_hdmi_infoframe_enable(type)) == 0)
+               return;
+
+       intel_dig_port->read_infoframe(encoder, crtc_state, type, &sdp,
+                                      sizeof(sdp));
+
+       ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp,
+                                                        sizeof(sdp));
+
+       if (ret)
+               drm_dbg_kms(&dev_priv->drm,
+                           "Failed to unpack DP HDR Metadata Infoframe SDP\n");
+}
+
+void intel_read_dp_sdp(struct intel_encoder *encoder,
+                      struct intel_crtc_state *crtc_state,
+                      unsigned int type)
+{
+       switch (type) {
+       case DP_SDP_VSC:
+               intel_read_dp_vsc_sdp(encoder, crtc_state,
+                                     &crtc_state->infoframes.vsc);
+               break;
+       case HDMI_PACKET_TYPE_GAMUT_METADATA:
+               intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state,
+                                                        &crtc_state->infoframes.drm.drm);
+               break;
+       default:
+               MISSING_CASE(type);
+               break;
+       }
 }
 
 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
 {
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
        int status = 0;
        int test_link_rate;
        u8 test_lane_count, test_link_bw;
@@ -4869,7 +5235,7 @@ static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
                                   &test_lane_count);
 
        if (status <= 0) {
-               DRM_DEBUG_KMS("Lane count read failed\n");
+               drm_dbg_kms(&i915->drm, "Lane count read failed\n");
                return DP_TEST_NAK;
        }
        test_lane_count &= DP_MAX_LANE_COUNT_MASK;
@@ -4877,7 +5243,7 @@ static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
        status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
                                   &test_link_bw);
        if (status <= 0) {
-               DRM_DEBUG_KMS("Link Rate read failed\n");
+               drm_dbg_kms(&i915->drm, "Link Rate read failed\n");
                return DP_TEST_NAK;
        }
        test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
@@ -4895,6 +5261,7 @@ static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
 
 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
 {
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
        u8 test_pattern;
        u8 test_misc;
        __be16 h_width, v_height;
@@ -4904,7 +5271,7 @@ static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
        status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
                                   &test_pattern);
        if (status <= 0) {
-               DRM_DEBUG_KMS("Test pattern read failed\n");
+               drm_dbg_kms(&i915->drm, "Test pattern read failed\n");
                return DP_TEST_NAK;
        }
        if (test_pattern != DP_COLOR_RAMP)
@@ -4913,21 +5280,21 @@ static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
        status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
                                  &h_width, 2);
        if (status <= 0) {
-               DRM_DEBUG_KMS("H Width read failed\n");
+               drm_dbg_kms(&i915->drm, "H Width read failed\n");
                return DP_TEST_NAK;
        }
 
        status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
                                  &v_height, 2);
        if (status <= 0) {
-               DRM_DEBUG_KMS("V Height read failed\n");
+               drm_dbg_kms(&i915->drm, "V Height read failed\n");
                return DP_TEST_NAK;
        }
 
        status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
                                   &test_misc);
        if (status <= 0) {
-               DRM_DEBUG_KMS("TEST MISC read failed\n");
+               drm_dbg_kms(&i915->drm, "TEST MISC read failed\n");
                return DP_TEST_NAK;
        }
        if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
@@ -4956,6 +5323,7 @@ static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
 
 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
 {
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
        u8 test_result = DP_TEST_ACK;
        struct intel_connector *intel_connector = intel_dp->attached_connector;
        struct drm_connector *connector = &intel_connector->base;
@@ -4972,9 +5340,10 @@ static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
                 */
                if (intel_dp->aux.i2c_nack_count > 0 ||
                        intel_dp->aux.i2c_defer_count > 0)
-                       DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
-                                     intel_dp->aux.i2c_nack_count,
-                                     intel_dp->aux.i2c_defer_count);
+                       drm_dbg_kms(&i915->drm,
+                                   "EDID read had %d NACKs, %d DEFERs\n",
+                                   intel_dp->aux.i2c_nack_count,
+                                   intel_dp->aux.i2c_defer_count);
                intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
        } else {
                struct edid *block = intel_connector->detect_edid;
@@ -4986,7 +5355,8 @@ static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
 
                if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
                                       block->checksum) <= 0)
-                       DRM_DEBUG_KMS("Failed to write EDID checksum\n");
+                       drm_dbg_kms(&i915->drm,
+                                   "Failed to write EDID checksum\n");
 
                test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
                intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
@@ -4998,43 +5368,217 @@ static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
        return test_result;
 }
 
+static u8 intel_dp_prepare_phytest(struct intel_dp *intel_dp)
+{
+       struct drm_dp_phy_test_params *data =
+               &intel_dp->compliance.test_data.phytest;
+
+       if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) {
+               DRM_DEBUG_KMS("DP Phy Test pattern AUX read failure\n");
+               return DP_TEST_NAK;
+       }
+
+       /*
+        * link_mst is set to false to avoid executing mst related code
+        * during compliance testing.
+        */
+       intel_dp->link_mst = false;
+
+       return DP_TEST_ACK;
+}
+
+static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp)
+{
+       struct drm_i915_private *dev_priv =
+                       to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_dp_phy_test_params *data =
+                       &intel_dp->compliance.test_data.phytest;
+       struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
+       enum pipe pipe = crtc->pipe;
+       u32 pattern_val;
+
+       switch (data->phy_pattern) {
+       case DP_PHY_TEST_PATTERN_NONE:
+               DRM_DEBUG_KMS("Disable Phy Test Pattern\n");
+               intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0);
+               break;
+       case DP_PHY_TEST_PATTERN_D10_2:
+               DRM_DEBUG_KMS("Set D10.2 Phy Test Pattern\n");
+               intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
+                              DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2);
+               break;
+       case DP_PHY_TEST_PATTERN_ERROR_COUNT:
+               DRM_DEBUG_KMS("Set Error Count Phy Test Pattern\n");
+               intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
+                              DDI_DP_COMP_CTL_ENABLE |
+                              DDI_DP_COMP_CTL_SCRAMBLED_0);
+               break;
+       case DP_PHY_TEST_PATTERN_PRBS7:
+               DRM_DEBUG_KMS("Set PRBS7 Phy Test Pattern\n");
+               intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
+                              DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7);
+               break;
+       case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
+               /*
+                * FIXME: Ideally pattern should come from DPCD 0x250. As
+                * current firmware of DPR-100 could not set it, so hardcoding
+                * now for complaince test.
+                */
+               DRM_DEBUG_KMS("Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n");
+               pattern_val = 0x3e0f83e0;
+               intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val);
+               pattern_val = 0x0f83e0f8;
+               intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val);
+               pattern_val = 0x0000f83e;
+               intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val);
+               intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
+                              DDI_DP_COMP_CTL_ENABLE |
+                              DDI_DP_COMP_CTL_CUSTOM80);
+               break;
+       case DP_PHY_TEST_PATTERN_CP2520:
+               /*
+                * FIXME: Ideally pattern should come from DPCD 0x24A. As
+                * current firmware of DPR-100 could not set it, so hardcoding
+                * now for complaince test.
+                */
+               DRM_DEBUG_KMS("Set HBR2 compliance Phy Test Pattern\n");
+               pattern_val = 0xFB;
+               intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
+                              DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 |
+                              pattern_val);
+               break;
+       default:
+               WARN(1, "Invalid Phy Test Pattern\n");
+       }
+}
+
+static void
+intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp)
+{
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = intel_dig_port->base.base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
+       enum pipe pipe = crtc->pipe;
+       u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
+
+       trans_ddi_func_ctl_value = intel_de_read(dev_priv,
+                                                TRANS_DDI_FUNC_CTL(pipe));
+       trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe));
+       dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe));
+
+       trans_ddi_func_ctl_value &= ~(TRANS_DDI_FUNC_ENABLE |
+                                     TGL_TRANS_DDI_PORT_MASK);
+       trans_conf_value &= ~PIPECONF_ENABLE;
+       dp_tp_ctl_value &= ~DP_TP_CTL_ENABLE;
+
+       intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value);
+       intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe),
+                      trans_ddi_func_ctl_value);
+       intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value);
+}
+
+static void
+intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp, uint8_t lane_cnt)
+{
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = intel_dig_port->base.base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       enum port port = intel_dig_port->base.port;
+       struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
+       enum pipe pipe = crtc->pipe;
+       u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
+
+       trans_ddi_func_ctl_value = intel_de_read(dev_priv,
+                                                TRANS_DDI_FUNC_CTL(pipe));
+       trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe));
+       dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe));
+
+       trans_ddi_func_ctl_value |= TRANS_DDI_FUNC_ENABLE |
+                                   TGL_TRANS_DDI_SELECT_PORT(port);
+       trans_conf_value |= PIPECONF_ENABLE;
+       dp_tp_ctl_value |= DP_TP_CTL_ENABLE;
+
+       intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value);
+       intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value);
+       intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe),
+                      trans_ddi_func_ctl_value);
+}
+
+void intel_dp_process_phy_request(struct intel_dp *intel_dp)
+{
+       struct drm_dp_phy_test_params *data =
+               &intel_dp->compliance.test_data.phytest;
+       u8 link_status[DP_LINK_STATUS_SIZE];
+
+       if (!intel_dp_get_link_status(intel_dp, link_status)) {
+               DRM_DEBUG_KMS("failed to get link status\n");
+               return;
+       }
+
+       /* retrieve vswing & pre-emphasis setting */
+       intel_dp_get_adjust_train(intel_dp, link_status);
+
+       intel_dp_autotest_phy_ddi_disable(intel_dp);
+
+       intel_dp_set_signal_levels(intel_dp);
+
+       intel_dp_phy_pattern_update(intel_dp);
+
+       intel_dp_autotest_phy_ddi_enable(intel_dp, data->num_lanes);
+
+       drm_dp_set_phy_test_pattern(&intel_dp->aux, data,
+                                   link_status[DP_DPCD_REV]);
+}
+
 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
 {
-       u8 test_result = DP_TEST_NAK;
+       u8 test_result;
+
+       test_result = intel_dp_prepare_phytest(intel_dp);
+       if (test_result != DP_TEST_ACK)
+               DRM_ERROR("Phy test preparation failed\n");
+
+       intel_dp_process_phy_request(intel_dp);
+
        return test_result;
 }
 
 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
 {
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
        u8 response = DP_TEST_NAK;
        u8 request = 0;
        int status;
 
        status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
        if (status <= 0) {
-               DRM_DEBUG_KMS("Could not read test request from sink\n");
+               drm_dbg_kms(&i915->drm,
+                           "Could not read test request from sink\n");
                goto update_status;
        }
 
        switch (request) {
        case DP_TEST_LINK_TRAINING:
-               DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
+               drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n");
                response = intel_dp_autotest_link_training(intel_dp);
                break;
        case DP_TEST_LINK_VIDEO_PATTERN:
-               DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
+               drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n");
                response = intel_dp_autotest_video_pattern(intel_dp);
                break;
        case DP_TEST_LINK_EDID_READ:
-               DRM_DEBUG_KMS("EDID test requested\n");
+               drm_dbg_kms(&i915->drm, "EDID test requested\n");
                response = intel_dp_autotest_edid(intel_dp);
                break;
        case DP_TEST_LINK_PHY_TEST_PATTERN:
-               DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
+               drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n");
                response = intel_dp_autotest_phy_pattern(intel_dp);
                break;
        default:
-               DRM_DEBUG_KMS("Invalid test request '%02x'\n", request);
+               drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n",
+                           request);
                break;
        }
 
@@ -5044,64 +5588,59 @@ static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
 update_status:
        status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
        if (status <= 0)
-               DRM_DEBUG_KMS("Could not write test response to sink\n");
+               drm_dbg_kms(&i915->drm,
+                           "Could not write test response to sink\n");
 }
 
 static int
 intel_dp_check_mst_status(struct intel_dp *intel_dp)
 {
-       bool bret;
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+       bool need_retrain = false;
 
-       if (intel_dp->is_mst) {
-               u8 esi[DP_DPRX_ESI_LEN] = { 0 };
-               int ret = 0;
+       if (!intel_dp->is_mst)
+               return -EINVAL;
+
+       WARN_ON_ONCE(intel_dp->active_mst_links < 0);
+
+       for (;;) {
+               u8 esi[DP_DPRX_ESI_LEN] = {};
+               bool bret, handled;
                int retry;
-               bool handled;
 
-               WARN_ON_ONCE(intel_dp->active_mst_links < 0);
                bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
-go_again:
-               if (bret == true) {
-
-                       /* check link status - esi[10] = 0x200c */
-                       if (intel_dp->active_mst_links > 0 &&
-                           !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
-                               DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
-                               intel_dp_start_link_train(intel_dp);
-                               intel_dp_stop_link_train(intel_dp);
-                       }
+               if (!bret) {
+                       drm_dbg_kms(&i915->drm,
+                                   "failed to get ESI - device may have failed\n");
+                       return -EINVAL;
+               }
 
-                       DRM_DEBUG_KMS("got esi %3ph\n", esi);
-                       ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
-
-                       if (handled) {
-                               for (retry = 0; retry < 3; retry++) {
-                                       int wret;
-                                       wret = drm_dp_dpcd_write(&intel_dp->aux,
-                                                                DP_SINK_COUNT_ESI+1,
-                                                                &esi[1], 3);
-                                       if (wret == 3) {
-                                               break;
-                                       }
-                               }
+               /* check link status - esi[10] = 0x200c */
+               if (intel_dp->active_mst_links > 0 && !need_retrain &&
+                   !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
+                       drm_dbg_kms(&i915->drm,
+                                   "channel EQ not ok, retraining\n");
+                       need_retrain = true;
+               }
 
-                               bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
-                               if (bret == true) {
-                                       DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
-                                       goto go_again;
-                               }
-                       } else
-                               ret = 0;
+               drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi);
 
-                       return ret;
-               } else {
-                       DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
-                       intel_dp->is_mst = false;
-                       drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
-                                                       intel_dp->is_mst);
+               drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
+               if (!handled)
+                       break;
+
+               for (retry = 0; retry < 3; retry++) {
+                       int wret;
+
+                       wret = drm_dp_dpcd_write(&intel_dp->aux,
+                                                DP_SINK_COUNT_ESI+1,
+                                                &esi[1], 3);
+                       if (wret == 3)
+                               break;
                }
        }
-       return -EINVAL;
+
+       return need_retrain;
 }
 
 static bool
@@ -5138,20 +5677,102 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
        return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
 }
 
+static bool intel_dp_has_connector(struct intel_dp *intel_dp,
+                                  const struct drm_connector_state *conn_state)
+{
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+       struct intel_encoder *encoder;
+       enum pipe pipe;
+
+       if (!conn_state->best_encoder)
+               return false;
+
+       /* SST */
+       encoder = &dp_to_dig_port(intel_dp)->base;
+       if (conn_state->best_encoder == &encoder->base)
+               return true;
+
+       /* MST */
+       for_each_pipe(i915, pipe) {
+               encoder = &intel_dp->mst_encoders[pipe]->base;
+               if (conn_state->best_encoder == &encoder->base)
+                       return true;
+       }
+
+       return false;
+}
+
+static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp,
+                                     struct drm_modeset_acquire_ctx *ctx,
+                                     u32 *crtc_mask)
+{
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+       struct drm_connector_list_iter conn_iter;
+       struct intel_connector *connector;
+       int ret = 0;
+
+       *crtc_mask = 0;
+
+       if (!intel_dp_needs_link_retrain(intel_dp))
+               return 0;
+
+       drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+       for_each_intel_connector_iter(connector, &conn_iter) {
+               struct drm_connector_state *conn_state =
+                       connector->base.state;
+               struct intel_crtc_state *crtc_state;
+               struct intel_crtc *crtc;
+
+               if (!intel_dp_has_connector(intel_dp, conn_state))
+                       continue;
+
+               crtc = to_intel_crtc(conn_state->crtc);
+               if (!crtc)
+                       continue;
+
+               ret = drm_modeset_lock(&crtc->base.mutex, ctx);
+               if (ret)
+                       break;
+
+               crtc_state = to_intel_crtc_state(crtc->base.state);
+
+               drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state));
+
+               if (!crtc_state->hw.active)
+                       continue;
+
+               if (conn_state->commit &&
+                   !try_wait_for_completion(&conn_state->commit->hw_done))
+                       continue;
+
+               *crtc_mask |= drm_crtc_mask(&crtc->base);
+       }
+       drm_connector_list_iter_end(&conn_iter);
+
+       if (!intel_dp_needs_link_retrain(intel_dp))
+               *crtc_mask = 0;
+
+       return ret;
+}
+
+static bool intel_dp_is_connected(struct intel_dp *intel_dp)
+{
+       struct intel_connector *connector = intel_dp->attached_connector;
+
+       return connector->base.status == connector_status_connected ||
+               intel_dp->is_mst;
+}
+
 int intel_dp_retrain_link(struct intel_encoder *encoder,
                          struct drm_modeset_acquire_ctx *ctx)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-       struct intel_connector *connector = intel_dp->attached_connector;
-       struct drm_connector_state *conn_state;
-       struct intel_crtc_state *crtc_state;
        struct intel_crtc *crtc;
+       u32 crtc_mask;
        int ret;
 
-       /* FIXME handle the MST connectors as well */
-
-       if (!connector || connector->base.status != connector_status_connected)
+       if (!intel_dp_is_connected(intel_dp))
                return 0;
 
        ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
@@ -5159,46 +5780,42 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
        if (ret)
                return ret;
 
-       conn_state = connector->base.state;
-
-       crtc = to_intel_crtc(conn_state->crtc);
-       if (!crtc)
-               return 0;
-
-       ret = drm_modeset_lock(&crtc->base.mutex, ctx);
+       ret = intel_dp_prep_link_retrain(intel_dp, ctx, &crtc_mask);
        if (ret)
                return ret;
 
-       crtc_state = to_intel_crtc_state(crtc->base.state);
-
-       drm_WARN_ON(&dev_priv->drm, !intel_crtc_has_dp_encoder(crtc_state));
-
-       if (!crtc_state->hw.active)
+       if (crtc_mask == 0)
                return 0;
 
-       if (conn_state->commit &&
-           !try_wait_for_completion(&conn_state->commit->hw_done))
-               return 0;
+       drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n",
+                   encoder->base.base.id, encoder->base.name);
 
-       if (!intel_dp_needs_link_retrain(intel_dp))
-               return 0;
+       for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
+               const struct intel_crtc_state *crtc_state =
+                       to_intel_crtc_state(crtc->base.state);
 
-       /* Suppress underruns caused by re-training */
-       intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
-       if (crtc_state->has_pch_encoder)
-               intel_set_pch_fifo_underrun_reporting(dev_priv,
-                                                     intel_crtc_pch_transcoder(crtc), false);
+               /* Suppress underruns caused by re-training */
+               intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
+               if (crtc_state->has_pch_encoder)
+                       intel_set_pch_fifo_underrun_reporting(dev_priv,
+                                                             intel_crtc_pch_transcoder(crtc), false);
+       }
 
        intel_dp_start_link_train(intel_dp);
        intel_dp_stop_link_train(intel_dp);
 
-       /* Keep underrun reporting disabled until things are stable */
-       intel_wait_for_vblank(dev_priv, crtc->pipe);
+       for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
+               const struct intel_crtc_state *crtc_state =
+                       to_intel_crtc_state(crtc->base.state);
+
+               /* Keep underrun reporting disabled until things are stable */
+               intel_wait_for_vblank(dev_priv, crtc->pipe);
 
-       intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
-       if (crtc_state->has_pch_encoder)
-               intel_set_pch_fifo_underrun_reporting(dev_priv,
-                                                     intel_crtc_pch_transcoder(crtc), true);
+               intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
+               if (crtc_state->has_pch_encoder)
+                       intel_set_pch_fifo_underrun_reporting(dev_priv,
+                                                             intel_crtc_pch_transcoder(crtc), true);
+       }
 
        return 0;
 }
@@ -5217,14 +5834,13 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
  */
 static enum intel_hotplug_state
 intel_dp_hotplug(struct intel_encoder *encoder,
-                struct intel_connector *connector,
-                bool irq_received)
+                struct intel_connector *connector)
 {
        struct drm_modeset_acquire_ctx ctx;
        enum intel_hotplug_state state;
        int ret;
 
-       state = intel_encoder_hotplug(encoder, connector, irq_received);
+       state = intel_encoder_hotplug(encoder, connector);
 
        drm_modeset_acquire_init(&ctx, 0);
 
@@ -5248,7 +5864,7 @@ intel_dp_hotplug(struct intel_encoder *encoder,
         * Keeping it consistent with intel_ddi_hotplug() and
         * intel_hdmi_hotplug().
         */
-       if (state == INTEL_HOTPLUG_UNCHANGED && irq_received)
+       if (state == INTEL_HOTPLUG_UNCHANGED && !connector->hotplug_retries)
                state = INTEL_HOTPLUG_RETRY;
 
        return state;
@@ -5256,6 +5872,7 @@ intel_dp_hotplug(struct intel_encoder *encoder,
 
 static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
 {
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
        u8 val;
 
        if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
@@ -5274,7 +5891,7 @@ static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
                intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
 
        if (val & DP_SINK_SPECIFIC_IRQ)
-               DRM_DEBUG_DRIVER("Sink specific irq unhandled\n");
+               drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n");
 }
 
 /*
@@ -5341,6 +5958,7 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
 static enum drm_connector_status
 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
 {
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
        struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
        u8 *dpcd = intel_dp->dpcd;
        u8 type;
@@ -5388,7 +6006,7 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
        }
 
        /* Anything else is out of spec, warn and ignore */
-       DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
+       drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n");
        return connector_status_disconnected;
 }
 
@@ -5401,64 +6019,7 @@ edp_detect(struct intel_dp *intel_dp)
 static bool ibx_digital_port_connected(struct intel_encoder *encoder)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       u32 bit;
-
-       switch (encoder->hpd_pin) {
-       case HPD_PORT_B:
-               bit = SDE_PORTB_HOTPLUG;
-               break;
-       case HPD_PORT_C:
-               bit = SDE_PORTC_HOTPLUG;
-               break;
-       case HPD_PORT_D:
-               bit = SDE_PORTD_HOTPLUG;
-               break;
-       default:
-               MISSING_CASE(encoder->hpd_pin);
-               return false;
-       }
-
-       return intel_de_read(dev_priv, SDEISR) & bit;
-}
-
-static bool cpt_digital_port_connected(struct intel_encoder *encoder)
-{
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       u32 bit;
-
-       switch (encoder->hpd_pin) {
-       case HPD_PORT_B:
-               bit = SDE_PORTB_HOTPLUG_CPT;
-               break;
-       case HPD_PORT_C:
-               bit = SDE_PORTC_HOTPLUG_CPT;
-               break;
-       case HPD_PORT_D:
-               bit = SDE_PORTD_HOTPLUG_CPT;
-               break;
-       default:
-               MISSING_CASE(encoder->hpd_pin);
-               return false;
-       }
-
-       return intel_de_read(dev_priv, SDEISR) & bit;
-}
-
-static bool spt_digital_port_connected(struct intel_encoder *encoder)
-{
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       u32 bit;
-
-       switch (encoder->hpd_pin) {
-       case HPD_PORT_A:
-               bit = SDE_PORTA_HOTPLUG_SPT;
-               break;
-       case HPD_PORT_E:
-               bit = SDE_PORTE_HOTPLUG_SPT;
-               break;
-       default:
-               return cpt_digital_port_connected(encoder);
-       }
+       u32 bit = dev_priv->hotplug.pch_hpd[encoder->hpd_pin];
 
        return intel_de_read(dev_priv, SDEISR) & bit;
 }
@@ -5512,89 +6073,9 @@ static bool gm45_digital_port_connected(struct intel_encoder *encoder)
 static bool ilk_digital_port_connected(struct intel_encoder *encoder)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin];
 
-       if (encoder->hpd_pin == HPD_PORT_A)
-               return intel_de_read(dev_priv, DEISR) & DE_DP_A_HOTPLUG;
-       else
-               return ibx_digital_port_connected(encoder);
-}
-
-static bool snb_digital_port_connected(struct intel_encoder *encoder)
-{
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
-       if (encoder->hpd_pin == HPD_PORT_A)
-               return intel_de_read(dev_priv, DEISR) & DE_DP_A_HOTPLUG;
-       else
-               return cpt_digital_port_connected(encoder);
-}
-
-static bool ivb_digital_port_connected(struct intel_encoder *encoder)
-{
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
-       if (encoder->hpd_pin == HPD_PORT_A)
-               return intel_de_read(dev_priv, DEISR) & DE_DP_A_HOTPLUG_IVB;
-       else
-               return cpt_digital_port_connected(encoder);
-}
-
-static bool bdw_digital_port_connected(struct intel_encoder *encoder)
-{
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
-       if (encoder->hpd_pin == HPD_PORT_A)
-               return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG;
-       else
-               return cpt_digital_port_connected(encoder);
-}
-
-static bool bxt_digital_port_connected(struct intel_encoder *encoder)
-{
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       u32 bit;
-
-       switch (encoder->hpd_pin) {
-       case HPD_PORT_A:
-               bit = BXT_DE_PORT_HP_DDIA;
-               break;
-       case HPD_PORT_B:
-               bit = BXT_DE_PORT_HP_DDIB;
-               break;
-       case HPD_PORT_C:
-               bit = BXT_DE_PORT_HP_DDIC;
-               break;
-       default:
-               MISSING_CASE(encoder->hpd_pin);
-               return false;
-       }
-
-       return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & bit;
-}
-
-static bool intel_combo_phy_connected(struct drm_i915_private *dev_priv,
-                                     enum phy phy)
-{
-       if (HAS_PCH_MCC(dev_priv) && phy == PHY_C)
-               return intel_de_read(dev_priv, SDEISR) & SDE_TC_HOTPLUG_ICP(PORT_TC1);
-
-       return intel_de_read(dev_priv, SDEISR) & SDE_DDI_HOTPLUG_ICP(phy);
-}
-
-static bool icp_digital_port_connected(struct intel_encoder *encoder)
-{
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
-       enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
-
-       if (intel_phy_is_combo(dev_priv, phy))
-               return intel_combo_phy_connected(dev_priv, phy);
-       else if (intel_phy_is_tc(dev_priv, phy))
-               return intel_tc_port_connected(dig_port);
-       else
-               MISSING_CASE(encoder->hpd_pin);
-
-       return false;
+       return intel_de_read(dev_priv, DEISR) & bit;
 }
 
 /*
@@ -5608,44 +6089,15 @@ static bool icp_digital_port_connected(struct intel_encoder *encoder)
  *
  * Return %true if port is connected, %false otherwise.
  */
-static bool __intel_digital_port_connected(struct intel_encoder *encoder)
-{
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
-       if (HAS_GMCH(dev_priv)) {
-               if (IS_GM45(dev_priv))
-                       return gm45_digital_port_connected(encoder);
-               else
-                       return g4x_digital_port_connected(encoder);
-       }
-
-       if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
-               return icp_digital_port_connected(encoder);
-       else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
-               return spt_digital_port_connected(encoder);
-       else if (IS_GEN9_LP(dev_priv))
-               return bxt_digital_port_connected(encoder);
-       else if (IS_GEN(dev_priv, 8))
-               return bdw_digital_port_connected(encoder);
-       else if (IS_GEN(dev_priv, 7))
-               return ivb_digital_port_connected(encoder);
-       else if (IS_GEN(dev_priv, 6))
-               return snb_digital_port_connected(encoder);
-       else if (IS_GEN(dev_priv, 5))
-               return ilk_digital_port_connected(encoder);
-
-       MISSING_CASE(INTEL_GEN(dev_priv));
-       return false;
-}
-
 bool intel_digital_port_connected(struct intel_encoder *encoder)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
        bool is_connected = false;
        intel_wakeref_t wakeref;
 
        with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
-               is_connected = __intel_digital_port_connected(encoder);
+               is_connected = dig_port->connected(encoder);
 
        return is_connected;
 }
@@ -5860,6 +6312,7 @@ static int intel_dp_get_modes(struct drm_connector *connector)
 static int
 intel_dp_connector_register(struct drm_connector *connector)
 {
+       struct drm_i915_private *i915 = to_i915(connector->dev);
        struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
        int ret;
 
@@ -5867,10 +6320,8 @@ intel_dp_connector_register(struct drm_connector *connector)
        if (ret)
                return ret;
 
-       intel_connector_debugfs_add(connector);
-
-       DRM_DEBUG_KMS("registering %s bus for %s\n",
-                     intel_dp->aux.name, connector->kdev->kobj.name);
+       drm_dbg_kms(&i915->drm, "registering %s bus for %s\n",
+                   intel_dp->aux.name, connector->kdev->kobj.name);
 
        intel_dp->aux.dev = connector->kdev;
        ret = drm_dp_aux_register(&intel_dp->aux);
@@ -5956,6 +6407,7 @@ static
 int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
                                u8 *an)
 {
+       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
        struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(&intel_dig_port->base.base));
        static const struct drm_dp_aux_msg msg = {
                .request = DP_AUX_NATIVE_WRITE,
@@ -5970,8 +6422,9 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
        dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN,
                                     an, DRM_HDCP_AN_LEN);
        if (dpcd_ret != DRM_HDCP_AN_LEN) {
-               DRM_DEBUG_KMS("Failed to write An over DP/AUX (%zd)\n",
-                             dpcd_ret);
+               drm_dbg_kms(&i915->drm,
+                           "Failed to write An over DP/AUX (%zd)\n",
+                           dpcd_ret);
                return dpcd_ret >= 0 ? -EIO : dpcd_ret;
        }
 
@@ -5987,17 +6440,19 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
                                rxbuf, sizeof(rxbuf),
                                DP_AUX_CH_CTL_AUX_AKSV_SELECT);
        if (ret < 0) {
-               DRM_DEBUG_KMS("Write Aksv over DP/AUX failed (%d)\n", ret);
+               drm_dbg_kms(&i915->drm,
+                           "Write Aksv over DP/AUX failed (%d)\n", ret);
                return ret;
        } else if (ret == 0) {
-               DRM_DEBUG_KMS("Aksv write over DP/AUX was empty\n");
+               drm_dbg_kms(&i915->drm, "Aksv write over DP/AUX was empty\n");
                return -EIO;
        }
 
        reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK;
        if (reply != DP_AUX_NATIVE_REPLY_ACK) {
-               DRM_DEBUG_KMS("Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n",
-                             reply);
+               drm_dbg_kms(&i915->drm,
+                           "Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n",
+                           reply);
                return -EIO;
        }
        return 0;
@@ -6006,11 +6461,14 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
 static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
                                   u8 *bksv)
 {
+       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
        ssize_t ret;
+
        ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
                               DRM_HDCP_KSV_LEN);
        if (ret != DRM_HDCP_KSV_LEN) {
-               DRM_DEBUG_KMS("Read Bksv from DP/AUX failed (%zd)\n", ret);
+               drm_dbg_kms(&i915->drm,
+                           "Read Bksv from DP/AUX failed (%zd)\n", ret);
                return ret >= 0 ? -EIO : ret;
        }
        return 0;
@@ -6019,7 +6477,9 @@ static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
 static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
                                      u8 *bstatus)
 {
+       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
        ssize_t ret;
+
        /*
         * For some reason the HDMI and DP HDCP specs call this register
         * definition by different names. In the HDMI spec, it's called BSTATUS,
@@ -6028,7 +6488,8 @@ static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
        ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO,
                               bstatus, DRM_HDCP_BSTATUS_LEN);
        if (ret != DRM_HDCP_BSTATUS_LEN) {
-               DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
+               drm_dbg_kms(&i915->drm,
+                           "Read bstatus from DP/AUX failed (%zd)\n", ret);
                return ret >= 0 ? -EIO : ret;
        }
        return 0;
@@ -6038,12 +6499,14 @@ static
 int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port,
                             u8 *bcaps)
 {
+       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
        ssize_t ret;
 
        ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
                               bcaps, 1);
        if (ret != 1) {
-               DRM_DEBUG_KMS("Read bcaps from DP/AUX failed (%zd)\n", ret);
+               drm_dbg_kms(&i915->drm,
+                           "Read bcaps from DP/AUX failed (%zd)\n", ret);
                return ret >= 0 ? -EIO : ret;
        }
 
@@ -6069,11 +6532,14 @@ static
 int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
                                u8 *ri_prime)
 {
+       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
        ssize_t ret;
+
        ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
                               ri_prime, DRM_HDCP_RI_LEN);
        if (ret != DRM_HDCP_RI_LEN) {
-               DRM_DEBUG_KMS("Read Ri' from DP/AUX failed (%zd)\n", ret);
+               drm_dbg_kms(&i915->drm, "Read Ri' from DP/AUX failed (%zd)\n",
+                           ret);
                return ret >= 0 ? -EIO : ret;
        }
        return 0;
@@ -6083,12 +6549,15 @@ static
 int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
                                 bool *ksv_ready)
 {
+       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
        ssize_t ret;
        u8 bstatus;
+
        ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
                               &bstatus, 1);
        if (ret != 1) {
-               DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
+               drm_dbg_kms(&i915->drm,
+                           "Read bstatus from DP/AUX failed (%zd)\n", ret);
                return ret >= 0 ? -EIO : ret;
        }
        *ksv_ready = bstatus & DP_BSTATUS_READY;
@@ -6099,6 +6568,7 @@ static
 int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
                                int num_downstream, u8 *ksv_fifo)
 {
+       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
        ssize_t ret;
        int i;
 
@@ -6110,8 +6580,9 @@ int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
                                       ksv_fifo + i * DRM_HDCP_KSV_LEN,
                                       len);
                if (ret != len) {
-                       DRM_DEBUG_KMS("Read ksv[%d] from DP/AUX failed (%zd)\n",
-                                     i, ret);
+                       drm_dbg_kms(&i915->drm,
+                                   "Read ksv[%d] from DP/AUX failed (%zd)\n",
+                                   i, ret);
                        return ret >= 0 ? -EIO : ret;
                }
        }
@@ -6122,6 +6593,7 @@ static
 int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
                                    int i, u32 *part)
 {
+       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
        ssize_t ret;
 
        if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
@@ -6131,7 +6603,8 @@ int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
                               DP_AUX_HDCP_V_PRIME(i), part,
                               DRM_HDCP_V_PRIME_PART_LEN);
        if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
-               DRM_DEBUG_KMS("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
+               drm_dbg_kms(&i915->drm,
+                           "Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
                return ret >= 0 ? -EIO : ret;
        }
        return 0;
@@ -6148,13 +6621,15 @@ int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
 static
 bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port)
 {
+       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
        ssize_t ret;
        u8 bstatus;
 
        ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
                               &bstatus, 1);
        if (ret != 1) {
-               DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
+               drm_dbg_kms(&i915->drm,
+                           "Read bstatus from DP/AUX failed (%zd)\n", ret);
                return false;
        }
 
@@ -6225,17 +6700,19 @@ static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = {
          0, 0 },
 };
 
-static inline
-int intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
-                                 u8 *rx_status)
+static int
+intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
+                             u8 *rx_status)
 {
+       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
        ssize_t ret;
 
        ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
                               DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status,
                               HDCP_2_2_DP_RXSTATUS_LEN);
        if (ret != HDCP_2_2_DP_RXSTATUS_LEN) {
-               DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
+               drm_dbg_kms(&i915->drm,
+                           "Read bstatus from DP/AUX failed (%zd)\n", ret);
                return ret >= 0 ? -EIO : ret;
        }
 
@@ -6279,6 +6756,7 @@ static ssize_t
 intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
                            const struct hdcp2_dp_msg_data *hdcp2_msg_data)
 {
+       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
        struct intel_dp *dp = &intel_dig_port->dp;
        struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
        u8 msg_id = hdcp2_msg_data->msg_id;
@@ -6310,8 +6788,9 @@ intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
        }
 
        if (ret)
-               DRM_DEBUG_KMS("msg_id %d, ret %d, timeout(mSec): %d\n",
-                             hdcp2_msg_data->msg_id, ret, timeout);
+               drm_dbg_kms(&i915->drm,
+                           "msg_id %d, ret %d, timeout(mSec): %d\n",
+                           hdcp2_msg_data->msg_id, ret, timeout);
 
        return ret;
 }
@@ -6397,6 +6876,7 @@ static
 int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
                            u8 msg_id, void *buf, size_t size)
 {
+       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
        unsigned int offset;
        u8 *byte = buf;
        ssize_t ret, bytes_to_recv, len;
@@ -6430,7 +6910,8 @@ int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
                ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, offset,
                                       (void *)byte, len);
                if (ret < 0) {
-                       DRM_DEBUG_KMS("msg_id %d, ret %zd\n", msg_id, ret);
+                       drm_dbg_kms(&i915->drm, "msg_id %d, ret %zd\n",
+                                   msg_id, ret);
                        return ret;
                }
 
@@ -6721,7 +7202,11 @@ static int intel_dp_connector_atomic_check(struct drm_connector *conn,
        if (ret)
                return ret;
 
-       if (INTEL_GEN(dev_priv) < 11)
+       /*
+        * We don't enable port sync on BDW due to missing w/as and
+        * due to not having adjusted the modeset sequence appropriately.
+        */
+       if (INTEL_GEN(dev_priv) < 9)
                return 0;
 
        if (!intel_connector_needs_modeset(state, conn))
@@ -6760,28 +7245,45 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
        .destroy = intel_dp_encoder_destroy,
 };
 
+static bool intel_edp_have_power(struct intel_dp *intel_dp)
+{
+       intel_wakeref_t wakeref;
+       bool have_power = false;
+
+       with_pps_lock(intel_dp, wakeref) {
+               have_power = edp_have_panel_power(intel_dp) &&
+                                                 edp_have_panel_vdd(intel_dp);
+       }
+
+       return have_power;
+}
+
 enum irqreturn
 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
 {
+       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
        struct intel_dp *intel_dp = &intel_dig_port->dp;
 
-       if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
+       if (intel_dig_port->base.type == INTEL_OUTPUT_EDP &&
+           (long_hpd || !intel_edp_have_power(intel_dp))) {
                /*
-                * vdd off can generate a long pulse on eDP which
+                * vdd off can generate a long/short pulse on eDP which
                 * would require vdd on to handle it, and thus we
                 * would end up in an endless cycle of
-                * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
+                * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..."
                 */
-               DRM_DEBUG_KMS("ignoring long hpd on eDP [ENCODER:%d:%s]\n",
-                             intel_dig_port->base.base.base.id,
-                             intel_dig_port->base.base.name);
+               drm_dbg_kms(&i915->drm,
+                           "ignoring %s hpd on eDP [ENCODER:%d:%s]\n",
+                           long_hpd ? "long" : "short",
+                           intel_dig_port->base.base.base.id,
+                           intel_dig_port->base.base.name);
                return IRQ_HANDLED;
        }
 
-       DRM_DEBUG_KMS("got hpd irq on [ENCODER:%d:%s] - %s\n",
-                     intel_dig_port->base.base.base.id,
-                     intel_dig_port->base.base.name,
-                     long_hpd ? "long" : "short");
+       drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n",
+                   intel_dig_port->base.base.base.id,
+                   intel_dig_port->base.base.name,
+                   long_hpd ? "long" : "short");
 
        if (long_hpd) {
                intel_dp->reset_link_params = true;
@@ -6789,18 +7291,25 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
        }
 
        if (intel_dp->is_mst) {
-               if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
+               switch (intel_dp_check_mst_status(intel_dp)) {
+               case -EINVAL:
                        /*
                         * If we were in MST mode, and device is not
                         * there, get out of MST mode
                         */
-                       DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
-                                     intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
+                       drm_dbg_kms(&i915->drm,
+                                   "MST device may have disappeared %d vs %d\n",
+                                   intel_dp->is_mst,
+                                   intel_dp->mst_mgr.mst_state);
                        intel_dp->is_mst = false;
                        drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
                                                        intel_dp->is_mst);
 
                        return IRQ_NONE;
+               case 1:
+                       return IRQ_NONE;
+               default:
+                       break;
                }
        }
 
@@ -7831,6 +8340,23 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
                intel_encoder->post_disable = g4x_post_disable_dp;
        }
 
+       if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
+           (HAS_PCH_CPT(dev_priv) && port != PORT_A))
+               intel_dig_port->dp.set_link_train = cpt_set_link_train;
+       else
+               intel_dig_port->dp.set_link_train = g4x_set_link_train;
+
+       if (IS_CHERRYVIEW(dev_priv))
+               intel_dig_port->dp.set_signal_levels = chv_set_signal_levels;
+       else if (IS_VALLEYVIEW(dev_priv))
+               intel_dig_port->dp.set_signal_levels = vlv_set_signal_levels;
+       else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
+               intel_dig_port->dp.set_signal_levels = ivb_cpu_edp_set_signal_levels;
+       else if (IS_GEN(dev_priv, 6) && port == PORT_A)
+               intel_dig_port->dp.set_signal_levels = snb_cpu_edp_set_signal_levels;
+       else
+               intel_dig_port->dp.set_signal_levels = g4x_set_signal_levels;
+
        intel_dig_port->dp.output_reg = output_reg;
        intel_dig_port->max_lanes = 4;
        intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port);
@@ -7851,6 +8377,18 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
 
        intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
 
+       if (HAS_GMCH(dev_priv)) {
+               if (IS_GM45(dev_priv))
+                       intel_dig_port->connected = gm45_digital_port_connected;
+               else
+                       intel_dig_port->connected = g4x_digital_port_connected;
+       } else {
+               if (port == PORT_A)
+                       intel_dig_port->connected = ilk_digital_port_connected;
+               else
+                       intel_dig_port->connected = ibx_digital_port_connected;
+       }
+
        if (port != PORT_A)
                intel_infoframe_init(intel_dig_port);
 
index 0c7be8ed1423add57f03a1b034d94d15995f8d27..1702959ca079e21bdadf1a5c8370837a49463eea 100644 (file)
@@ -16,6 +16,7 @@ struct drm_connector_state;
 struct drm_encoder;
 struct drm_i915_private;
 struct drm_modeset_acquire_ctx;
+struct drm_dp_vsc_sdp;
 struct intel_connector;
 struct intel_crtc_state;
 struct intel_digital_port;
@@ -108,13 +109,21 @@ int intel_dp_link_required(int pixel_clock, int bpp);
 int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
 bool intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
                            const struct drm_connector_state *conn_state);
-void intel_dp_vsc_enable(struct intel_dp *intel_dp,
-                        const struct intel_crtc_state *crtc_state,
-                        const struct drm_connector_state *conn_state);
-void intel_dp_hdr_metadata_enable(struct intel_dp *intel_dp,
+void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp,
                                  const struct intel_crtc_state *crtc_state,
-                                 const struct drm_connector_state *conn_state);
+                                 const struct drm_connector_state *conn_state,
+                                 struct drm_dp_vsc_sdp *vsc);
+void intel_write_dp_vsc_sdp(struct intel_encoder *encoder,
+                           const struct intel_crtc_state *crtc_state,
+                           struct drm_dp_vsc_sdp *vsc);
+void intel_dp_set_infoframes(struct intel_encoder *encoder, bool enable,
+                            const struct intel_crtc_state *crtc_state,
+                            const struct drm_connector_state *conn_state);
+void intel_read_dp_sdp(struct intel_encoder *encoder,
+                      struct intel_crtc_state *crtc_state,
+                      unsigned int type);
 bool intel_digital_port_connected(struct intel_encoder *encoder);
+void intel_dp_process_phy_request(struct intel_dp *intel_dp);
 
 static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
 {
index dbfa6895795b2e067a08ccae65b355bceab21422..0722540d64ad1a2c0400fbb4c825fe9d56530cc7 100644 (file)
@@ -27,6 +27,7 @@
 
 static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
 {
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
        u8 reg_val = 0;
 
        /* Early return when display use other mechanism to enable backlight. */
@@ -35,8 +36,8 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
 
        if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER,
                              &reg_val) < 0) {
-               DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
-                             DP_EDP_DISPLAY_CONTROL_REGISTER);
+               drm_dbg_kms(&i915->drm, "Failed to read DPCD register 0x%x\n",
+                           DP_EDP_DISPLAY_CONTROL_REGISTER);
                return;
        }
        if (enable)
@@ -46,8 +47,8 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
 
        if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER,
                               reg_val) != 1) {
-               DRM_DEBUG_KMS("Failed to %s aux backlight\n",
-                             enable ? "enable" : "disable");
+               drm_dbg_kms(&i915->drm, "Failed to %s aux backlight\n",
+                           enable ? "enable" : "disable");
        }
 }
 
@@ -58,6 +59,7 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
 static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
 {
        struct intel_dp *intel_dp = intel_attached_dp(connector);
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
        u8 read_val[2] = { 0x0 };
        u8 mode_reg;
        u16 level = 0;
@@ -65,8 +67,9 @@ static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
        if (drm_dp_dpcd_readb(&intel_dp->aux,
                              DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
                              &mode_reg) != 1) {
-               DRM_DEBUG_KMS("Failed to read the DPCD register 0x%x\n",
-                             DP_EDP_BACKLIGHT_MODE_SET_REGISTER);
+               drm_dbg_kms(&i915->drm,
+                           "Failed to read the DPCD register 0x%x\n",
+                           DP_EDP_BACKLIGHT_MODE_SET_REGISTER);
                return 0;
        }
 
@@ -80,8 +83,8 @@ static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
 
        if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
                             &read_val, sizeof(read_val)) < 0) {
-               DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
-                             DP_EDP_BACKLIGHT_BRIGHTNESS_MSB);
+               drm_dbg_kms(&i915->drm, "Failed to read DPCD register 0x%x\n",
+                           DP_EDP_BACKLIGHT_BRIGHTNESS_MSB);
                return 0;
        }
        level = read_val[0];
@@ -100,6 +103,7 @@ intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 lev
 {
        struct intel_connector *connector = to_intel_connector(conn_state->connector);
        struct intel_dp *intel_dp = intel_attached_dp(connector);
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
        u8 vals[2] = { 0x0 };
 
        vals[0] = level;
@@ -111,7 +115,8 @@ intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 lev
        }
        if (drm_dp_dpcd_write(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
                              vals, sizeof(vals)) < 0) {
-               DRM_DEBUG_KMS("Failed to write aux backlight level\n");
+               drm_dbg_kms(&i915->drm,
+                           "Failed to write aux backlight level\n");
                return;
        }
 }
@@ -133,7 +138,8 @@ static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector)
 
        freq = dev_priv->vbt.backlight.pwm_freq_hz;
        if (!freq) {
-               DRM_DEBUG_KMS("Use panel default backlight frequency\n");
+               drm_dbg_kms(&dev_priv->drm,
+                           "Use panel default backlight frequency\n");
                return false;
        }
 
@@ -146,13 +152,14 @@ static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector)
        fxp_max = DIV_ROUND_CLOSEST(fxp * 5, 4);
 
        if (fxp_min > fxp_actual || fxp_actual > fxp_max) {
-               DRM_DEBUG_KMS("Actual frequency out of range\n");
+               drm_dbg_kms(&dev_priv->drm, "Actual frequency out of range\n");
                return false;
        }
 
        if (drm_dp_dpcd_writeb(&intel_dp->aux,
                               DP_EDP_BACKLIGHT_FREQ_SET, (u8) f) < 0) {
-               DRM_DEBUG_KMS("Failed to write aux backlight freq\n");
+               drm_dbg_kms(&dev_priv->drm,
+                           "Failed to write aux backlight freq\n");
                return false;
        }
        return true;
@@ -163,13 +170,14 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
 {
        struct intel_connector *connector = to_intel_connector(conn_state->connector);
        struct intel_dp *intel_dp = intel_attached_dp(connector);
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
        struct intel_panel *panel = &connector->panel;
        u8 dpcd_buf, new_dpcd_buf, edp_backlight_mode;
 
        if (drm_dp_dpcd_readb(&intel_dp->aux,
                        DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) != 1) {
-               DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
-                             DP_EDP_BACKLIGHT_MODE_SET_REGISTER);
+               drm_dbg_kms(&i915->drm, "Failed to read DPCD register 0x%x\n",
+                           DP_EDP_BACKLIGHT_MODE_SET_REGISTER);
                return;
        }
 
@@ -186,7 +194,8 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
                if (drm_dp_dpcd_writeb(&intel_dp->aux,
                                       DP_EDP_PWMGEN_BIT_COUNT,
                                       panel->backlight.pwmgen_bit_count) < 0)
-                       DRM_DEBUG_KMS("Failed to write aux pwmgen bit count\n");
+                       drm_dbg_kms(&i915->drm,
+                                   "Failed to write aux pwmgen bit count\n");
 
                break;
 
@@ -203,7 +212,8 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
        if (new_dpcd_buf != dpcd_buf) {
                if (drm_dp_dpcd_writeb(&intel_dp->aux,
                        DP_EDP_BACKLIGHT_MODE_SET_REGISTER, new_dpcd_buf) < 0) {
-                       DRM_DEBUG_KMS("Failed to write aux backlight mode\n");
+                       drm_dbg_kms(&i915->drm,
+                                   "Failed to write aux backlight mode\n");
                }
        }
 
@@ -237,9 +247,11 @@ static u32 intel_dp_aux_calc_max_backlight(struct intel_connector *connector)
         * minimum value will applied automatically. So no need to check that.
         */
        freq = i915->vbt.backlight.pwm_freq_hz;
-       DRM_DEBUG_KMS("VBT defined backlight frequency %u Hz\n", freq);
+       drm_dbg_kms(&i915->drm, "VBT defined backlight frequency %u Hz\n",
+                   freq);
        if (!freq) {
-               DRM_DEBUG_KMS("Use panel default backlight frequency\n");
+               drm_dbg_kms(&i915->drm,
+                           "Use panel default backlight frequency\n");
                return max_backlight;
        }
 
@@ -254,12 +266,14 @@ static u32 intel_dp_aux_calc_max_backlight(struct intel_connector *connector)
         */
        if (drm_dp_dpcd_readb(&intel_dp->aux,
                              DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min) != 1) {
-               DRM_DEBUG_KMS("Failed to read pwmgen bit count cap min\n");
+               drm_dbg_kms(&i915->drm,
+                           "Failed to read pwmgen bit count cap min\n");
                return max_backlight;
        }
        if (drm_dp_dpcd_readb(&intel_dp->aux,
                              DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max) != 1) {
-               DRM_DEBUG_KMS("Failed to read pwmgen bit count cap max\n");
+               drm_dbg_kms(&i915->drm,
+                           "Failed to read pwmgen bit count cap max\n");
                return max_backlight;
        }
        pn_min &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
@@ -268,7 +282,8 @@ static u32 intel_dp_aux_calc_max_backlight(struct intel_connector *connector)
        fxp_min = DIV_ROUND_CLOSEST(fxp * 3, 4);
        fxp_max = DIV_ROUND_CLOSEST(fxp * 5, 4);
        if (fxp_min < (1 << pn_min) || (255 << pn_max) < fxp_max) {
-               DRM_DEBUG_KMS("VBT defined backlight frequency out of range\n");
+               drm_dbg_kms(&i915->drm,
+                           "VBT defined backlight frequency out of range\n");
                return max_backlight;
        }
 
@@ -279,10 +294,11 @@ static u32 intel_dp_aux_calc_max_backlight(struct intel_connector *connector)
                        break;
        }
 
-       DRM_DEBUG_KMS("Using eDP pwmgen bit count of %d\n", pn);
+       drm_dbg_kms(&i915->drm, "Using eDP pwmgen bit count of %d\n", pn);
        if (drm_dp_dpcd_writeb(&intel_dp->aux,
                               DP_EDP_PWMGEN_BIT_COUNT, pn) < 0) {
-               DRM_DEBUG_KMS("Failed to write aux pwmgen bit count\n");
+               drm_dbg_kms(&i915->drm,
+                           "Failed to write aux pwmgen bit count\n");
                return max_backlight;
        }
        panel->backlight.pwmgen_bit_count = pn;
@@ -312,6 +328,7 @@ static bool
 intel_dp_aux_display_control_capable(struct intel_connector *connector)
 {
        struct intel_dp *intel_dp = intel_attached_dp(connector);
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 
        /* Check the eDP Display control capabilities registers to determine if
         * the panel can support backlight control over the aux channel
@@ -319,7 +336,7 @@ intel_dp_aux_display_control_capable(struct intel_connector *connector)
        if (intel_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP &&
            (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP) &&
            !(intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP)) {
-               DRM_DEBUG_KMS("AUX Backlight Control Supported!\n");
+               drm_dbg_kms(&i915->drm, "AUX Backlight Control Supported!\n");
                return true;
        }
        return false;
@@ -329,8 +346,7 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
 {
        struct intel_panel *panel = &intel_connector->panel;
        struct intel_dp *intel_dp = enc_to_intel_dp(intel_connector->encoder);
-       struct drm_device *dev = intel_connector->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 
        if (i915_modparams.enable_dpcd_backlight == 0 ||
            !intel_dp_aux_display_control_capable(intel_connector))
@@ -340,18 +356,18 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
         * There are a lot of machines that don't advertise the backlight
         * control interface to use properly in their VBIOS, :\
         */
-       if (dev_priv->vbt.backlight.type !=
+       if (i915->vbt.backlight.type !=
            INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE &&
            i915_modparams.enable_dpcd_backlight != 1 &&
            !drm_dp_has_quirk(&intel_dp->desc, intel_dp->edid_quirks,
                              DP_QUIRK_FORCE_DPCD_BACKLIGHT)) {
-               DRM_DEV_INFO(dev->dev,
-                            "Panel advertises DPCD backlight support, but "
-                            "VBT disagrees. If your backlight controls "
-                            "don't work try booting with "
-                            "i915.enable_dpcd_backlight=1. If your machine "
-                            "needs this, please file a _new_ bug report on "
-                            "drm/i915, see " FDO_BUG_URL " for details.\n");
+               drm_info(&i915->drm,
+                        "Panel advertises DPCD backlight support, but "
+                        "VBT disagrees. If your backlight controls "
+                        "don't work try booting with "
+                        "i915.enable_dpcd_backlight=1. If your machine "
+                        "needs this, please file a _new_ bug report on "
+                        "drm/i915, see " FDO_BUG_URL " for details.\n");
                return -ENODEV;
        }
 
index a7defb37ab007f3132044fb3c1a528a9fc5103cb..e4f1843170b7aeee6f8cc5decc30eafac135bb66 100644 (file)
@@ -34,9 +34,8 @@ intel_dp_dump_link_status(const u8 link_status[DP_LINK_STATUS_SIZE])
                      link_status[3], link_status[4], link_status[5]);
 }
 
-static void
-intel_get_adjust_train(struct intel_dp *intel_dp,
-                      const u8 link_status[DP_LINK_STATUS_SIZE])
+void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
+                              const u8 link_status[DP_LINK_STATUS_SIZE])
 {
        u8 v = 0;
        u8 p = 0;
@@ -219,7 +218,7 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
                voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
 
                /* Update training set as requested by target */
-               intel_get_adjust_train(intel_dp, link_status);
+               intel_dp_get_adjust_train(intel_dp, link_status);
                if (!intel_dp_update_link_train(intel_dp)) {
                        drm_err(&i915->drm,
                                "failed to update link training\n");
@@ -338,7 +337,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
                }
 
                /* Update training set as requested by target */
-               intel_get_adjust_train(intel_dp, link_status);
+               intel_dp_get_adjust_train(intel_dp, link_status);
                if (!intel_dp_update_link_train(intel_dp)) {
                        drm_err(&i915->drm,
                                "failed to update link training\n");
index 174566adcc92b9069ac33cabeeb32e50c73f48b7..01f1dabbb060dca470c4521b3eec999577c150db 100644 (file)
@@ -6,8 +6,12 @@
 #ifndef __INTEL_DP_LINK_TRAINING_H__
 #define __INTEL_DP_LINK_TRAINING_H__
 
+#include <drm/drm_dp_helper.h>
+
 struct intel_dp;
 
+void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
+                              const u8 link_status[DP_LINK_STATUS_SIZE]);
 void intel_dp_start_link_train(struct intel_dp *intel_dp);
 void intel_dp_stop_link_train(struct intel_dp *intel_dp);
 
index 44f3fd251ca1f1ccc4bd9600c7ba50a26b0efd67..d18b406f2a7d2384bf3ed11e909f4d0bf8315c02 100644 (file)
@@ -47,9 +47,9 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
        struct intel_dp *intel_dp = &intel_mst->primary->dp;
        struct intel_connector *connector =
                to_intel_connector(conn_state->connector);
+       struct drm_i915_private *i915 = to_i915(connector->base.dev);
        const struct drm_display_mode *adjusted_mode =
                &crtc_state->hw.adjusted_mode;
-       void *port = connector->port;
        bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0,
                                           DP_DPCD_QUIRK_CONSTANT_N);
        int bpp, slots = -EINVAL;
@@ -65,7 +65,8 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
                                                       false);
 
                slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr,
-                                                     port, crtc_state->pbn, 0);
+                                                     connector->port,
+                                                     crtc_state->pbn, 0);
                if (slots == -EDEADLK)
                        return slots;
                if (slots >= 0)
@@ -73,7 +74,8 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
        }
 
        if (slots < 0) {
-               DRM_DEBUG_KMS("failed finding vcpi slots:%d\n", slots);
+               drm_dbg_kms(&i915->drm, "failed finding vcpi slots:%d\n",
+                           slots);
                return slots;
        }
 
@@ -88,56 +90,10 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
        return 0;
 }
 
-/*
- * Iterate over all connectors and return the smallest transcoder in the MST
- * stream
- */
-static enum transcoder
-intel_dp_mst_master_trans_compute(struct intel_atomic_state *state,
-                                 struct intel_dp *mst_port)
-{
-       struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-       struct intel_digital_connector_state *conn_state;
-       struct intel_connector *connector;
-       enum pipe ret = I915_MAX_PIPES;
-       int i;
-
-       if (INTEL_GEN(dev_priv) < 12)
-               return INVALID_TRANSCODER;
-
-       for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
-               struct intel_crtc_state *crtc_state;
-               struct intel_crtc *crtc;
-
-               if (connector->mst_port != mst_port || !conn_state->base.crtc)
-                       continue;
-
-               crtc = to_intel_crtc(conn_state->base.crtc);
-               crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
-               if (!crtc_state->uapi.active)
-                       continue;
-
-               /*
-                * Using crtc->pipe because crtc_state->cpu_transcoder is
-                * computed, so others CRTCs could have non-computed
-                * cpu_transcoder
-                */
-               if (crtc->pipe < ret)
-                       ret = crtc->pipe;
-       }
-
-       if (ret == I915_MAX_PIPES)
-               return INVALID_TRANSCODER;
-
-       /* Simple cast works because TGL don't have a eDP transcoder */
-       return (enum transcoder)ret;
-}
-
 static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
                                       struct intel_crtc_state *pipe_config,
                                       struct drm_connector_state *conn_state)
 {
-       struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
        struct intel_dp *intel_dp = &intel_mst->primary->dp;
@@ -147,7 +103,6 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
                to_intel_digital_connector_state(conn_state);
        const struct drm_display_mode *adjusted_mode =
                &pipe_config->hw.adjusted_mode;
-       void *port = connector->port;
        struct link_config_limits limits;
        int ret;
 
@@ -158,8 +113,7 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
        pipe_config->has_pch_encoder = false;
 
        if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
-               pipe_config->has_audio =
-                       drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, port);
+               pipe_config->has_audio = connector->port->has_audio;
        else
                pipe_config->has_audio =
                        intel_conn_state->force_audio == HDMI_AUDIO_ON;
@@ -201,7 +155,56 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
 
        intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
 
-       pipe_config->mst_master_transcoder = intel_dp_mst_master_trans_compute(state, intel_dp);
+       return 0;
+}
+
+/*
+ * Iterate over all connectors and return a mask of
+ * all CPU transcoders streaming over the same DP link.
+ */
+static unsigned int
+intel_dp_mst_transcoder_mask(struct intel_atomic_state *state,
+                            struct intel_dp *mst_port)
+{
+       struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+       const struct intel_digital_connector_state *conn_state;
+       struct intel_connector *connector;
+       u8 transcoders = 0;
+       int i;
+
+       if (INTEL_GEN(dev_priv) < 12)
+               return 0;
+
+       for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
+               const struct intel_crtc_state *crtc_state;
+               struct intel_crtc *crtc;
+
+               if (connector->mst_port != mst_port || !conn_state->base.crtc)
+                       continue;
+
+               crtc = to_intel_crtc(conn_state->base.crtc);
+               crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+
+               if (!crtc_state->hw.active)
+                       continue;
+
+               transcoders |= BIT(crtc_state->cpu_transcoder);
+       }
+
+       return transcoders;
+}
+
+static int intel_dp_mst_compute_config_late(struct intel_encoder *encoder,
+                                           struct intel_crtc_state *crtc_state,
+                                           struct drm_connector_state *conn_state)
+{
+       struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
+       struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
+       struct intel_dp *intel_dp = &intel_mst->primary->dp;
+
+       /* lowest numbered transcoder will be designated master */
+       crtc_state->mst_master_transcoder =
+               ffs(intel_dp_mst_transcoder_mask(state, intel_dp)) - 1;
 
        return 0;
 }
@@ -313,7 +316,8 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
        return ret;
 }
 
-static void intel_mst_disable_dp(struct intel_encoder *encoder,
+static void intel_mst_disable_dp(struct intel_atomic_state *state,
+                                struct intel_encoder *encoder,
                                 const struct intel_crtc_state *old_crtc_state,
                                 const struct drm_connector_state *old_conn_state)
 {
@@ -322,22 +326,25 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder,
        struct intel_dp *intel_dp = &intel_dig_port->dp;
        struct intel_connector *connector =
                to_intel_connector(old_conn_state->connector);
+       struct drm_i915_private *i915 = to_i915(connector->base.dev);
        int ret;
 
-       DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
+       drm_dbg_kms(&i915->drm, "active links %d\n",
+                   intel_dp->active_mst_links);
 
        drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, connector->port);
 
        ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
        if (ret) {
-               DRM_DEBUG_KMS("failed to update payload %d\n", ret);
+               drm_dbg_kms(&i915->drm, "failed to update payload %d\n", ret);
        }
        if (old_crtc_state->has_audio)
                intel_audio_codec_disable(encoder,
                                          old_crtc_state, old_conn_state);
 }
 
-static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
+static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
+                                     struct intel_encoder *encoder,
                                      const struct intel_crtc_state *old_crtc_state,
                                      const struct drm_connector_state *old_conn_state)
 {
@@ -371,7 +378,8 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
 
        if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
                                  DP_TP_STATUS_ACT_SENT, 1))
-               DRM_ERROR("Timed out waiting for ACT sent when disabling\n");
+               drm_err(&dev_priv->drm,
+                       "Timed out waiting for ACT sent when disabling\n");
        drm_dp_check_act_status(&intel_dp->mst_mgr);
 
        drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, connector->port);
@@ -402,13 +410,15 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
 
        intel_mst->connector = NULL;
        if (last_mst_stream)
-               intel_dig_port->base.post_disable(&intel_dig_port->base,
+               intel_dig_port->base.post_disable(state, &intel_dig_port->base,
                                                  old_crtc_state, NULL);
 
-       DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
+       drm_dbg_kms(&dev_priv->drm, "active links %d\n",
+                   intel_dp->active_mst_links);
 }
 
-static void intel_mst_pre_pll_enable_dp(struct intel_encoder *encoder,
+static void intel_mst_pre_pll_enable_dp(struct intel_atomic_state *state,
+                                       struct intel_encoder *encoder,
                                        const struct intel_crtc_state *pipe_config,
                                        const struct drm_connector_state *conn_state)
 {
@@ -417,11 +427,12 @@ static void intel_mst_pre_pll_enable_dp(struct intel_encoder *encoder,
        struct intel_dp *intel_dp = &intel_dig_port->dp;
 
        if (intel_dp->active_mst_links == 0)
-               intel_dig_port->base.pre_pll_enable(&intel_dig_port->base,
+               intel_dig_port->base.pre_pll_enable(state, &intel_dig_port->base,
                                                    pipe_config, NULL);
 }
 
-static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
+static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
+                                   struct intel_encoder *encoder,
                                    const struct intel_crtc_state *pipe_config,
                                    const struct drm_connector_state *conn_state)
 {
@@ -445,7 +456,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
                    INTEL_GEN(dev_priv) >= 12 && first_mst_stream &&
                    !intel_dp_mst_is_master_trans(pipe_config));
 
-       DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
+       drm_dbg_kms(&dev_priv->drm, "active links %d\n",
+                   intel_dp->active_mst_links);
 
        if (first_mst_stream)
                intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
@@ -453,7 +465,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
        drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true);
 
        if (first_mst_stream)
-               intel_dig_port->base.pre_enable(&intel_dig_port->base,
+               intel_dig_port->base.pre_enable(state, &intel_dig_port->base,
                                                pipe_config, NULL);
 
        ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr,
@@ -461,7 +473,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
                                       pipe_config->pbn,
                                       pipe_config->dp_m_n.tu);
        if (!ret)
-               DRM_ERROR("failed to allocate vcpi\n");
+               drm_err(&dev_priv->drm, "failed to allocate vcpi\n");
 
        intel_dp->active_mst_links++;
        temp = intel_de_read(dev_priv, intel_dp->regs.dp_tp_status);
@@ -477,14 +489,15 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
         * here for the following ones.
         */
        if (INTEL_GEN(dev_priv) < 12 || !first_mst_stream)
-               intel_ddi_enable_pipe_clock(pipe_config);
+               intel_ddi_enable_pipe_clock(encoder, pipe_config);
 
        intel_ddi_set_dp_msa(pipe_config, conn_state);
 
        intel_dp_set_m_n(pipe_config, M1_N1);
 }
 
-static void intel_mst_enable_dp(struct intel_encoder *encoder,
+static void intel_mst_enable_dp(struct intel_atomic_state *state,
+                               struct intel_encoder *encoder,
                                const struct intel_crtc_state *pipe_config,
                                const struct drm_connector_state *conn_state)
 {
@@ -495,19 +508,23 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
 
        drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder);
 
-       intel_enable_pipe(pipe_config);
-
-       intel_crtc_vblank_on(pipe_config);
+       intel_ddi_enable_transcoder_func(encoder, pipe_config);
 
-       DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
+       drm_dbg_kms(&dev_priv->drm, "active links %d\n",
+                   intel_dp->active_mst_links);
 
        if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
                                  DP_TP_STATUS_ACT_SENT, 1))
-               DRM_ERROR("Timed out waiting for ACT sent\n");
+               drm_err(&dev_priv->drm, "Timed out waiting for ACT sent\n");
 
        drm_dp_check_act_status(&intel_dp->mst_mgr);
 
        drm_dp_update_payload_part2(&intel_dp->mst_mgr);
+
+       intel_enable_pipe(pipe_config);
+
+       intel_crtc_vblank_on(pipe_config);
+
        if (pipe_config->has_audio)
                intel_audio_codec_enable(encoder, pipe_config, conn_state);
 }
@@ -786,6 +803,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
        intel_encoder->pipe_mask = ~0;
 
        intel_encoder->compute_config = intel_dp_mst_compute_config;
+       intel_encoder->compute_config_late = intel_dp_mst_compute_config_late;
        intel_encoder->disable = intel_mst_disable_dp;
        intel_encoder->post_disable = intel_mst_post_disable_dp;
        intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp;
index 2d47f1f756a2ec294c2c939b20305d255931f6a3..b45185b80bec57909a958f366381509a7fd3bf37 100644 (file)
@@ -80,7 +80,7 @@ intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
 {
        struct intel_atomic_state *state = to_intel_atomic_state(s);
 
-       WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
+       drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
 
        if (!state->dpll_set) {
                state->dpll_set = true;
@@ -979,7 +979,7 @@ hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
        struct intel_crtc_state *crtc_state =
                intel_atomic_get_new_crtc_state(state, crtc);
 
-       if (WARN_ON(crtc_state->port_clock / 2 != 135000))
+       if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
                return NULL;
 
        crtc_state->dpll_hw_state.spll = SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz |
@@ -1616,7 +1616,7 @@ static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
        dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
                    ref_clock / 0x8000;
 
-       if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0))
+       if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
                return 0;
 
        return dco_freq / (p0 * p1 * p2 * 5);
@@ -2074,7 +2074,7 @@ bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
 
        clk_div->p1 = best_clock.p1;
        clk_div->p2 = best_clock.p2;
-       WARN_ON(best_clock.m1 != 2);
+       drm_WARN_ON(&i915->drm, best_clock.m1 != 2);
        clk_div->n = best_clock.n;
        clk_div->m2_int = best_clock.m2 >> 22;
        clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
index d7a6bf2277df4e56599d0b1420a1a593c0559e63..29fec6a92d1732dc38b6f00f15cbbd2a5b6e8dc7 100644 (file)
@@ -34,7 +34,7 @@
 #define DSB_BYTE_EN_SHIFT              20
 #define DSB_REG_VALUE_MASK             0xfffff
 
-static inline bool is_dsb_busy(struct intel_dsb *dsb)
+static bool is_dsb_busy(struct intel_dsb *dsb)
 {
        struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -43,7 +43,7 @@ static inline bool is_dsb_busy(struct intel_dsb *dsb)
        return DSB_STATUS & intel_de_read(dev_priv, DSB_CTRL(pipe, dsb->id));
 }
 
-static inline bool intel_dsb_enable_engine(struct intel_dsb *dsb)
+static bool intel_dsb_enable_engine(struct intel_dsb *dsb)
 {
        struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -63,7 +63,7 @@ static inline bool intel_dsb_enable_engine(struct intel_dsb *dsb)
        return true;
 }
 
-static inline bool intel_dsb_disable_engine(struct intel_dsb *dsb)
+static bool intel_dsb_disable_engine(struct intel_dsb *dsb)
 {
        struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
index a2a937109a5ad113e18b8363c1dc5d9807114716..afa4e6817e8c7be4fe0c96305ddee4a60005f722 100644 (file)
@@ -31,20 +31,21 @@ int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi)
 
 int intel_dsi_get_modes(struct drm_connector *connector)
 {
+       struct drm_i915_private *i915 = to_i915(connector->dev);
        struct intel_connector *intel_connector = to_intel_connector(connector);
        struct drm_display_mode *mode;
 
-       DRM_DEBUG_KMS("\n");
+       drm_dbg_kms(&i915->drm, "\n");
 
        if (!intel_connector->panel.fixed_mode) {
-               DRM_DEBUG_KMS("no fixed mode\n");
+               drm_dbg_kms(&i915->drm, "no fixed mode\n");
                return 0;
        }
 
        mode = drm_mode_duplicate(connector->dev,
                                  intel_connector->panel.fixed_mode);
        if (!mode) {
-               DRM_DEBUG_KMS("drm_mode_duplicate failed\n");
+               drm_dbg_kms(&i915->drm, "drm_mode_duplicate failed\n");
                return 0;
        }
 
@@ -60,7 +61,7 @@ enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
        const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
        int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
 
-       DRM_DEBUG_KMS("\n");
+       drm_dbg_kms(&dev_priv->drm, "\n");
 
        if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
                return MODE_NO_DBLESCAN;
index 574dcfec9577e81389ec28c4b9942a41f40f6b90..eed037ec0b297f0ec250a6e38dca10d636e6e57d 100644 (file)
@@ -121,7 +121,7 @@ struct i2c_adapter_lookup {
 #define  ICL_GPIO_DDPA_CTRLCLK_2       8
 #define  ICL_GPIO_DDPA_CTRLDATA_2      9
 
-static inline enum port intel_dsi_seq_port_to_port(u8 port)
+static enum port intel_dsi_seq_port_to_port(u8 port)
 {
        return port ? PORT_C : PORT_A;
 }
@@ -453,8 +453,7 @@ static inline void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi,
 
 static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
 {
-       struct drm_device *drm_dev = intel_dsi->base.base.dev;
-       struct device *dev = &drm_dev->pdev->dev;
+       struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
        struct i2c_adapter *adapter;
        struct i2c_msg msg;
        int ret;
@@ -471,7 +470,7 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
 
        adapter = i2c_get_adapter(intel_dsi->i2c_bus_num);
        if (!adapter) {
-               DRM_DEV_ERROR(dev, "Cannot find a valid i2c bus for xfer\n");
+               drm_err(&i915->drm, "Cannot find a valid i2c bus for xfer\n");
                goto err_bus;
        }
 
@@ -489,9 +488,9 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
 
        ret = i2c_transfer(adapter, &msg, 1);
        if (ret < 0)
-               DRM_DEV_ERROR(dev,
-                             "Failed to xfer payload of size (%u) to reg (%u)\n",
-                             payload_size, reg_offset);
+               drm_err(&i915->drm,
+                       "Failed to xfer payload of size (%u) to reg (%u)\n",
+                       payload_size, reg_offset);
 
        kfree(payload_data);
 err_alloc:
index 341d5ce8b062adf318a77599a3678dec752285de..5cd09034519b3475d014f92fc3a9733e25b5ec5f 100644 (file)
@@ -183,7 +183,8 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
        pipe_config->hw.adjusted_mode.crtc_clock = pipe_config->port_clock;
 }
 
-static void intel_disable_dvo(struct intel_encoder *encoder,
+static void intel_disable_dvo(struct intel_atomic_state *state,
+                             struct intel_encoder *encoder,
                              const struct intel_crtc_state *old_crtc_state,
                              const struct drm_connector_state *old_conn_state)
 {
@@ -197,7 +198,8 @@ static void intel_disable_dvo(struct intel_encoder *encoder,
        intel_de_read(dev_priv, dvo_reg);
 }
 
-static void intel_enable_dvo(struct intel_encoder *encoder,
+static void intel_enable_dvo(struct intel_atomic_state *state,
+                            struct intel_encoder *encoder,
                             const struct intel_crtc_state *pipe_config,
                             const struct drm_connector_state *conn_state)
 {
@@ -272,7 +274,8 @@ static int intel_dvo_compute_config(struct intel_encoder *encoder,
        return 0;
 }
 
-static void intel_dvo_pre_enable(struct intel_encoder *encoder,
+static void intel_dvo_pre_enable(struct intel_atomic_state *state,
+                                struct intel_encoder *encoder,
                                 const struct intel_crtc_state *pipe_config,
                                 const struct drm_connector_state *conn_state)
 {
index c125ca9ab9b3a93fa4c27da86f29140e4618e9e2..1c26673acb2dd8bd603c427a1eddae38b86761e6 100644 (file)
@@ -104,7 +104,7 @@ static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
        /* Wait for compressing bit to clear */
        if (intel_de_wait_for_clear(dev_priv, FBC_STATUS,
                                    FBC_STAT_COMPRESSING, 10)) {
-               DRM_DEBUG_KMS("FBC idle timed out\n");
+               drm_dbg_kms(&dev_priv->drm, "FBC idle timed out\n");
                return;
        }
 }
@@ -485,7 +485,8 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
        if (!ret)
                goto err_llb;
        else if (ret > 1) {
-               DRM_INFO_ONCE("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
+               drm_info_once(&dev_priv->drm,
+                             "Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
        }
 
        fbc->threshold = ret;
@@ -520,8 +521,9 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
                               dev_priv->dsm.start + compressed_llb->start);
        }
 
-       DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
-                     fbc->compressed_fb.size, fbc->threshold);
+       drm_dbg_kms(&dev_priv->drm,
+                   "reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
+                   fbc->compressed_fb.size, fbc->threshold);
 
        return 0;
 
@@ -530,7 +532,7 @@ err_fb:
        i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
 err_llb:
        if (drm_mm_initialized(&dev_priv->mm.stolen))
-               pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
+               drm_info_once(&dev_priv->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
        return -ENOSPC;
 }
 
@@ -538,6 +540,9 @@ static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
 {
        struct intel_fbc *fbc = &dev_priv->fbc;
 
+       if (WARN_ON(intel_fbc_hw_is_active(dev_priv)))
+               return;
+
        if (!drm_mm_node_allocated(&fbc->compressed_fb))
                return;
 
@@ -562,7 +567,7 @@ void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
 }
 
 static bool stride_is_valid(struct drm_i915_private *dev_priv,
-                           unsigned int stride)
+                           u64 modifier, unsigned int stride)
 {
        /* This should have been caught earlier. */
        if (drm_WARN_ON_ONCE(&dev_priv->drm, (stride & (64 - 1)) != 0))
@@ -578,6 +583,11 @@ static bool stride_is_valid(struct drm_i915_private *dev_priv,
        if (IS_GEN(dev_priv, 4) && !IS_G4X(dev_priv) && stride < 2048)
                return false;
 
+       /* Display WA #1105: skl,bxt,kbl,cfl,glk */
+       if (IS_GEN(dev_priv, 9) &&
+           modifier == DRM_FORMAT_MOD_LINEAR && stride & 511)
+               return false;
+
        if (stride > 16384)
                return false;
 
@@ -605,6 +615,19 @@ static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
        }
 }
 
+static bool rotation_is_valid(struct drm_i915_private *dev_priv,
+                             u32 pixel_format, unsigned int rotation)
+{
+       if (INTEL_GEN(dev_priv) >= 9 && pixel_format == DRM_FORMAT_RGB565 &&
+           drm_rotation_90_or_270(rotation))
+               return false;
+       else if (INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv) &&
+                rotation != DRM_MODE_ROTATE_0)
+               return false;
+
+       return true;
+}
+
 /*
  * For some reason, the hardware tracking starts looking at whatever we
  * programmed as the display plane base address register. It does not look at
@@ -639,6 +662,22 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
        return effective_w <= max_w && effective_h <= max_h;
 }
 
+static bool tiling_is_valid(struct drm_i915_private *dev_priv,
+                           uint64_t modifier)
+{
+       switch (modifier) {
+       case DRM_FORMAT_MOD_LINEAR:
+               if (INTEL_GEN(dev_priv) >= 9)
+                       return true;
+               return false;
+       case I915_FORMAT_MOD_X_TILED:
+       case I915_FORMAT_MOD_Y_TILED:
+               return true;
+       default:
+               return false;
+       }
+}
+
 static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
                                         const struct intel_crtc_state *crtc_state,
                                         const struct intel_plane_state *plane_state)
@@ -672,6 +711,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
 
        cache->fb.format = fb->format;
        cache->fb.stride = fb->pitches[0];
+       cache->fb.modifier = fb->modifier;
 
        drm_WARN_ON(&dev_priv->drm, plane_state->flags & PLANE_HAS_FENCE &&
                    !plane_state->vma->fence);
@@ -745,30 +785,40 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
                return false;
        }
 
-       /* The use of a CPU fence is mandatory in order to detect writes
-        * by the CPU to the scanout and trigger updates to the FBC.
+       /* The use of a CPU fence is one of two ways to detect writes by the
+        * CPU to the scanout and trigger updates to the FBC.
+        *
+        * The other method is by software tracking (see
+        * intel_fbc_invalidate/flush()), it will manually notify FBC and nuke
+        * the current compressed buffer and recompress it.
         *
         * Note that is possible for a tiled surface to be unmappable (and
-        * so have no fence associated with it) due to aperture constaints
+        * so have no fence associated with it) due to aperture constraints
         * at the time of pinning.
         *
         * FIXME with 90/270 degree rotation we should use the fence on
         * the normal GTT view (the rotated view doesn't even have a
         * fence). Would need changes to the FBC fence Y offset as well.
-        * For now this will effecively disable FBC with 90/270 degree
+        * For now this will effectively disable FBC with 90/270 degree
         * rotation.
         */
-       if (cache->fence_id < 0) {
+       if (INTEL_GEN(dev_priv) < 9 && cache->fence_id < 0) {
                fbc->no_fbc_reason = "framebuffer not tiled or fenced";
                return false;
        }
-       if (INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv) &&
-           cache->plane.rotation != DRM_MODE_ROTATE_0) {
+
+       if (!rotation_is_valid(dev_priv, cache->fb.format->format,
+                              cache->plane.rotation)) {
                fbc->no_fbc_reason = "rotation unsupported";
                return false;
        }
 
-       if (!stride_is_valid(dev_priv, cache->fb.stride)) {
+       if (!tiling_is_valid(dev_priv, cache->fb.modifier)) {
+               fbc->no_fbc_reason = "tiling unsupported";
+               return false;
+       }
+
+       if (!stride_is_valid(dev_priv, cache->fb.modifier, cache->fb.stride)) {
                fbc->no_fbc_reason = "framebuffer stride not supported";
                return false;
        }
@@ -947,7 +997,8 @@ static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
        drm_WARN_ON(&dev_priv->drm, !fbc->crtc);
        drm_WARN_ON(&dev_priv->drm, fbc->active);
 
-       DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe));
+       drm_dbg_kms(&dev_priv->drm, "Disabling FBC on pipe %c\n",
+                   pipe_name(crtc->pipe));
 
        __intel_fbc_cleanup_cfb(dev_priv);
 
@@ -1175,7 +1226,8 @@ void intel_fbc_enable(struct intel_atomic_state *state,
        else
                cache->gen9_wa_cfb_stride = 0;
 
-       DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe));
+       drm_dbg_kms(&dev_priv->drm, "Enabling FBC on pipe %c\n",
+                   pipe_name(crtc->pipe));
        fbc->no_fbc_reason = "FBC enabled but not active yet\n";
 
        fbc->crtc = crtc;
@@ -1237,7 +1289,7 @@ static void intel_fbc_underrun_work_fn(struct work_struct *work)
        if (fbc->underrun_detected || !fbc->crtc)
                goto out;
 
-       DRM_DEBUG_KMS("Disabling FBC due to FIFO underrun.\n");
+       drm_dbg_kms(&dev_priv->drm, "Disabling FBC due to FIFO underrun.\n");
        fbc->underrun_detected = true;
 
        intel_fbc_deactivate(dev_priv, "FIFO underrun");
@@ -1263,7 +1315,8 @@ int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv)
                return ret;
 
        if (dev_priv->fbc.underrun_detected) {
-               DRM_DEBUG_KMS("Re-allowing FBC after fifo underrun\n");
+               drm_dbg_kms(&dev_priv->drm,
+                           "Re-allowing FBC after fifo underrun\n");
                dev_priv->fbc.no_fbc_reason = "FIFO underrun cleared";
        }
 
@@ -1334,7 +1387,8 @@ static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv)
        /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
        if (intel_vtd_active() &&
            (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) {
-               DRM_INFO("Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
+               drm_info(&dev_priv->drm,
+                        "Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
                return true;
        }
 
@@ -1362,8 +1416,8 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
                mkwrite_device_info(dev_priv)->display.has_fbc = false;
 
        i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv);
-       DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n",
-                     i915_modparams.enable_fbc);
+       drm_dbg_kms(&dev_priv->drm, "Sanitized enable_fbc value: %d\n",
+                   i915_modparams.enable_fbc);
 
        if (!HAS_FBC(dev_priv)) {
                fbc->no_fbc_reason = "unsupported by this chipset";
index 3bc804212a99037c6b264ed9e3896deae94261b3..bd39eb6a21b8bd6bd515da15b14b92abaabb76a7 100644 (file)
@@ -146,7 +146,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
        if (IS_ERR(obj))
                obj = i915_gem_object_create_shmem(dev_priv, size);
        if (IS_ERR(obj)) {
-               DRM_ERROR("failed to allocate framebuffer\n");
+               drm_err(&dev_priv->drm, "failed to allocate framebuffer\n");
                return PTR_ERR(obj);
        }
 
@@ -183,21 +183,23 @@ static int intelfb_create(struct drm_fb_helper *helper,
        if (intel_fb &&
            (sizes->fb_width > intel_fb->base.width ||
             sizes->fb_height > intel_fb->base.height)) {
-               DRM_DEBUG_KMS("BIOS fb too small (%dx%d), we require (%dx%d),"
-                             " releasing it\n",
-                             intel_fb->base.width, intel_fb->base.height,
-                             sizes->fb_width, sizes->fb_height);
+               drm_dbg_kms(&dev_priv->drm,
+                           "BIOS fb too small (%dx%d), we require (%dx%d),"
+                           " releasing it\n",
+                           intel_fb->base.width, intel_fb->base.height,
+                           sizes->fb_width, sizes->fb_height);
                drm_framebuffer_put(&intel_fb->base);
                intel_fb = ifbdev->fb = NULL;
        }
        if (!intel_fb || drm_WARN_ON(dev, !intel_fb_obj(&intel_fb->base))) {
-               DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
+               drm_dbg_kms(&dev_priv->drm,
+                           "no BIOS fb, allocating a new one\n");
                ret = intelfb_alloc(helper, sizes);
                if (ret)
                        return ret;
                intel_fb = ifbdev->fb;
        } else {
-               DRM_DEBUG_KMS("re-using BIOS fb\n");
+               drm_dbg_kms(&dev_priv->drm, "re-using BIOS fb\n");
                prealloc = true;
                sizes->fb_width = intel_fb->base.width;
                sizes->fb_height = intel_fb->base.height;
@@ -220,7 +222,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
 
        info = drm_fb_helper_alloc_fbi(helper);
        if (IS_ERR(info)) {
-               DRM_ERROR("Failed to allocate fb_info\n");
+               drm_err(&dev_priv->drm, "Failed to allocate fb_info\n");
                ret = PTR_ERR(info);
                goto out_unpin;
        }
@@ -240,7 +242,8 @@ static int intelfb_create(struct drm_fb_helper *helper,
 
        vaddr = i915_vma_pin_iomap(vma);
        if (IS_ERR(vaddr)) {
-               DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
+               drm_err(&dev_priv->drm,
+                       "Failed to remap framebuffer into virtual memory\n");
                ret = PTR_ERR(vaddr);
                goto out_unpin;
        }
@@ -258,9 +261,9 @@ static int intelfb_create(struct drm_fb_helper *helper,
 
        /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
 
-       DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x\n",
-                     ifbdev->fb->base.width, ifbdev->fb->base.height,
-                     i915_ggtt_offset(vma));
+       drm_dbg_kms(&dev_priv->drm, "allocated %dx%d fb: 0x%08x\n",
+                   ifbdev->fb->base.width, ifbdev->fb->base.height,
+                   i915_ggtt_offset(vma));
        ifbdev->vma = vma;
        ifbdev->vma_flags = flags;
 
@@ -309,6 +312,7 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
 static bool intel_fbdev_init_bios(struct drm_device *dev,
                                 struct intel_fbdev *ifbdev)
 {
+       struct drm_i915_private *i915 = to_i915(dev);
        struct intel_framebuffer *fb = NULL;
        struct drm_crtc *crtc;
        struct intel_crtc *intel_crtc;
@@ -321,21 +325,24 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
                intel_crtc = to_intel_crtc(crtc);
 
                if (!crtc->state->active || !obj) {
-                       DRM_DEBUG_KMS("pipe %c not active or no fb, skipping\n",
-                                     pipe_name(intel_crtc->pipe));
+                       drm_dbg_kms(&i915->drm,
+                                   "pipe %c not active or no fb, skipping\n",
+                                   pipe_name(intel_crtc->pipe));
                        continue;
                }
 
                if (obj->base.size > max_size) {
-                       DRM_DEBUG_KMS("found possible fb from plane %c\n",
-                                     pipe_name(intel_crtc->pipe));
+                       drm_dbg_kms(&i915->drm,
+                                   "found possible fb from plane %c\n",
+                                   pipe_name(intel_crtc->pipe));
                        fb = to_intel_framebuffer(crtc->primary->state->fb);
                        max_size = obj->base.size;
                }
        }
 
        if (!fb) {
-               DRM_DEBUG_KMS("no active fbs found, not using BIOS config\n");
+               drm_dbg_kms(&i915->drm,
+                           "no active fbs found, not using BIOS config\n");
                goto out;
        }
 
@@ -346,13 +353,14 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
                intel_crtc = to_intel_crtc(crtc);
 
                if (!crtc->state->active) {
-                       DRM_DEBUG_KMS("pipe %c not active, skipping\n",
-                                     pipe_name(intel_crtc->pipe));
+                       drm_dbg_kms(&i915->drm,
+                                   "pipe %c not active, skipping\n",
+                                   pipe_name(intel_crtc->pipe));
                        continue;
                }
 
-               DRM_DEBUG_KMS("checking plane %c for BIOS fb\n",
-                             pipe_name(intel_crtc->pipe));
+               drm_dbg_kms(&i915->drm, "checking plane %c for BIOS fb\n",
+                           pipe_name(intel_crtc->pipe));
 
                /*
                 * See if the plane fb we found above will fit on this
@@ -362,9 +370,10 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
                cur_size = crtc->state->adjusted_mode.crtc_hdisplay;
                cur_size = cur_size * fb->base.format->cpp[0];
                if (fb->base.pitches[0] < cur_size) {
-                       DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n",
-                                     pipe_name(intel_crtc->pipe),
-                                     cur_size, fb->base.pitches[0]);
+                       drm_dbg_kms(&i915->drm,
+                                   "fb not wide enough for plane %c (%d vs %d)\n",
+                                   pipe_name(intel_crtc->pipe),
+                                   cur_size, fb->base.pitches[0]);
                        fb = NULL;
                        break;
                }
@@ -372,28 +381,32 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
                cur_size = crtc->state->adjusted_mode.crtc_vdisplay;
                cur_size = intel_fb_align_height(&fb->base, 0, cur_size);
                cur_size *= fb->base.pitches[0];
-               DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n",
-                             pipe_name(intel_crtc->pipe),
-                             crtc->state->adjusted_mode.crtc_hdisplay,
-                             crtc->state->adjusted_mode.crtc_vdisplay,
-                             fb->base.format->cpp[0] * 8,
-                             cur_size);
+               drm_dbg_kms(&i915->drm,
+                           "pipe %c area: %dx%d, bpp: %d, size: %d\n",
+                           pipe_name(intel_crtc->pipe),
+                           crtc->state->adjusted_mode.crtc_hdisplay,
+                           crtc->state->adjusted_mode.crtc_vdisplay,
+                           fb->base.format->cpp[0] * 8,
+                           cur_size);
 
                if (cur_size > max_size) {
-                       DRM_DEBUG_KMS("fb not big enough for plane %c (%d vs %d)\n",
-                                     pipe_name(intel_crtc->pipe),
-                                     cur_size, max_size);
+                       drm_dbg_kms(&i915->drm,
+                                   "fb not big enough for plane %c (%d vs %d)\n",
+                                   pipe_name(intel_crtc->pipe),
+                                   cur_size, max_size);
                        fb = NULL;
                        break;
                }
 
-               DRM_DEBUG_KMS("fb big enough for plane %c (%d >= %d)\n",
-                             pipe_name(intel_crtc->pipe),
-                             max_size, cur_size);
+               drm_dbg_kms(&i915->drm,
+                           "fb big enough for plane %c (%d >= %d)\n",
+                           pipe_name(intel_crtc->pipe),
+                           max_size, cur_size);
        }
 
        if (!fb) {
-               DRM_DEBUG_KMS("BIOS fb not suitable for all pipes, not using\n");
+               drm_dbg_kms(&i915->drm,
+                           "BIOS fb not suitable for all pipes, not using\n");
                goto out;
        }
 
@@ -415,7 +428,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
        }
 
 
-       DRM_DEBUG_KMS("using BIOS fb for initial console\n");
+       drm_dbg_kms(&i915->drm, "using BIOS fb for initial console\n");
        return true;
 
 out:
@@ -522,8 +535,9 @@ void intel_fbdev_fini(struct drm_i915_private *dev_priv)
  * processing, fbdev will perform a full connector reprobe if a hotplug event
  * was received while HPD was suspended.
  */
-static void intel_fbdev_hpd_set_suspend(struct intel_fbdev *ifbdev, int state)
+static void intel_fbdev_hpd_set_suspend(struct drm_i915_private *i915, int state)
 {
+       struct intel_fbdev *ifbdev = i915->fbdev;
        bool send_hpd = false;
 
        mutex_lock(&ifbdev->hpd_lock);
@@ -533,7 +547,7 @@ static void intel_fbdev_hpd_set_suspend(struct intel_fbdev *ifbdev, int state)
        mutex_unlock(&ifbdev->hpd_lock);
 
        if (send_hpd) {
-               DRM_DEBUG_KMS("Handling delayed fbcon HPD event\n");
+               drm_dbg_kms(&i915->drm, "Handling delayed fbcon HPD event\n");
                drm_fb_helper_hotplug_event(&ifbdev->helper);
        }
 }
@@ -588,7 +602,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
        drm_fb_helper_set_suspend(&ifbdev->helper, state);
        console_unlock();
 
-       intel_fbdev_hpd_set_suspend(ifbdev, state);
+       intel_fbdev_hpd_set_suspend(dev_priv, state);
 }
 
 void intel_fbdev_output_poll_changed(struct drm_device *dev)
index 6cb02c912accf079529cba521718c7c8492ba76c..2979ed2588eb93ad9aa6dcc02c1c433bb9c7cab5 100644 (file)
@@ -302,12 +302,14 @@ void intel_frontbuffer_track(struct intel_frontbuffer *old,
                     BITS_PER_TYPE(atomic_t));
 
        if (old) {
-               WARN_ON(!(atomic_read(&old->bits) & frontbuffer_bits));
+               drm_WARN_ON(old->obj->base.dev,
+                           !(atomic_read(&old->bits) & frontbuffer_bits));
                atomic_andnot(frontbuffer_bits, &old->bits);
        }
 
        if (new) {
-               WARN_ON(atomic_read(&new->bits) & frontbuffer_bits);
+               drm_WARN_ON(new->obj->base.dev,
+                           atomic_read(&new->bits) & frontbuffer_bits);
                atomic_or(frontbuffer_bits, &new->bits);
        }
 }
index a0cc894c386814d4a776085457aa78a47ba94589..212d4ee682059e7dec696cd8cecb7c8ddbad2698 100644 (file)
@@ -64,13 +64,14 @@ static void assert_global_state_read_locked(struct intel_atomic_state *state)
                        return;
        }
 
-       WARN(1, "Global state not read locked\n");
+       drm_WARN(&dev_priv->drm, 1, "Global state not read locked\n");
 }
 
 struct intel_global_state *
 intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
                                  struct intel_global_obj *obj)
 {
+       struct drm_i915_private *i915 = to_i915(state->base.dev);
        int index, num_objs, i;
        size_t size;
        struct __intel_global_objs_state *arr;
@@ -106,8 +107,8 @@ intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
 
        state->num_global_objs = num_objs;
 
-       DRM_DEBUG_ATOMIC("Added new global object %p state %p to %p\n",
-                        obj, obj_state, state);
+       drm_dbg_atomic(&i915->drm, "Added new global object %p state %p to %p\n",
+                      obj, obj_state, state);
 
        return obj_state;
 }
@@ -147,7 +148,7 @@ void intel_atomic_swap_global_state(struct intel_atomic_state *state)
 
        for_each_oldnew_global_obj_in_state(state, obj, old_obj_state,
                                            new_obj_state, i) {
-               WARN_ON(obj->state != old_obj_state);
+               drm_WARN_ON(&dev_priv->drm, obj->state != old_obj_state);
 
                /*
                 * If the new state wasn't modified (and properly
index 1fd3a5a6296b136f2cab3bf776a4b8ec1e4b3e20..a8d119b6b45c8eab2862c569dbbe57df5c638f1f 100644 (file)
@@ -379,8 +379,7 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
        return ret;
 }
 
-static inline
-unsigned int gmbus_max_xfer_size(struct drm_i915_private *dev_priv)
+static unsigned int gmbus_max_xfer_size(struct drm_i915_private *dev_priv)
 {
        return INTEL_GEN(dev_priv) >= 9 ? GEN9_GMBUS_BYTE_COUNT_MAX :
               GMBUS_BYTE_COUNT_MAX;
index ee0f27ea2810d03e8bf85a07ea62e9dcccca2e8a..2cbc4619b4ce6d3175f05572fafcc4b491f2b7f3 100644 (file)
@@ -109,18 +109,16 @@ bool intel_hdcp2_capable(struct intel_connector *connector)
        return capable;
 }
 
-static inline
-bool intel_hdcp_in_use(struct drm_i915_private *dev_priv,
-                      enum transcoder cpu_transcoder, enum port port)
+static bool intel_hdcp_in_use(struct drm_i915_private *dev_priv,
+                             enum transcoder cpu_transcoder, enum port port)
 {
        return intel_de_read(dev_priv,
                             HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
               HDCP_STATUS_ENC;
 }
 
-static inline
-bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
-                       enum transcoder cpu_transcoder, enum port port)
+static bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
+                              enum transcoder cpu_transcoder, enum port port)
 {
        return intel_de_read(dev_priv,
                             HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
@@ -853,8 +851,7 @@ static int _intel_hdcp_enable(struct intel_connector *connector)
        return ret;
 }
 
-static inline
-struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
+static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
 {
        return container_of(hdcp, struct intel_connector, hdcp);
 }
@@ -1391,6 +1388,7 @@ static
 int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
 {
        struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+       struct drm_i915_private *i915 = to_i915(connector->base.dev);
        struct intel_hdcp *hdcp = &connector->hdcp;
        union {
                struct hdcp2_rep_stream_manage stream_manage;
@@ -1431,7 +1429,7 @@ int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
        hdcp->seq_num_m++;
 
        if (hdcp->seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
-               DRM_DEBUG_KMS("seq_num_m roll over.\n");
+               drm_dbg_kms(&i915->drm, "seq_num_m roll over.\n");
                return -1;
        }
 
@@ -1855,8 +1853,7 @@ static const struct component_ops i915_hdcp_component_ops = {
        .unbind = i915_hdcp_component_unbind,
 };
 
-static inline
-enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
+static enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
 {
        switch (port) {
        case PORT_A:
@@ -1868,8 +1865,7 @@ enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
        }
 }
 
-static inline
-enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
+static enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
 {
        switch (cpu_transcoder) {
        case TRANSCODER_A ... TRANSCODER_D:
@@ -1879,8 +1875,8 @@ enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
        }
 }
 
-static inline int initialize_hdcp_port_data(struct intel_connector *connector,
-                                           const struct intel_hdcp_shim *shim)
+static int initialize_hdcp_port_data(struct intel_connector *connector,
+                                    const struct intel_hdcp_shim *shim)
 {
        struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
        struct intel_hdcp *hdcp = &connector->hdcp;
@@ -2075,7 +2071,8 @@ int intel_hdcp_disable(struct intel_connector *connector)
        return ret;
 }
 
-void intel_hdcp_update_pipe(struct intel_encoder *encoder,
+void intel_hdcp_update_pipe(struct intel_atomic_state *state,
+                           struct intel_encoder *encoder,
                            const struct intel_crtc_state *crtc_state,
                            const struct drm_connector_state *conn_state)
 {
index 7c12ad609b1fe47f1ae35408797b54c91235a0d8..86bbaec120cc30569510387017a95a63720e323a 100644 (file)
@@ -11,6 +11,7 @@
 struct drm_connector;
 struct drm_connector_state;
 struct drm_i915_private;
+struct intel_atomic_state;
 struct intel_connector;
 struct intel_crtc_state;
 struct intel_encoder;
@@ -26,7 +27,8 @@ int intel_hdcp_init(struct intel_connector *connector,
 int intel_hdcp_enable(struct intel_connector *connector,
                      enum transcoder cpu_transcoder, u8 content_type);
 int intel_hdcp_disable(struct intel_connector *connector);
-void intel_hdcp_update_pipe(struct intel_encoder *encoder,
+void intel_hdcp_update_pipe(struct intel_atomic_state *state,
+                           struct intel_encoder *encoder,
                            const struct intel_crtc_state *crtc_state,
                            const struct drm_connector_state *conn_state);
 bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
index 821411b93dacc4a2f36af15667850f735d5426dd..010f3724071031b565d946ae91e9ddbeb2e4c472 100644 (file)
@@ -44,7 +44,6 @@
 #include "intel_audio.h"
 #include "intel_connector.h"
 #include "intel_ddi.h"
-#include "intel_display_debugfs.h"
 #include "intel_display_types.h"
 #include "intel_dp.h"
 #include "intel_dpio_phy.h"
@@ -707,13 +706,15 @@ void intel_read_infoframe(struct intel_encoder *encoder,
        /* see comment above for the reason for this offset */
        ret = hdmi_infoframe_unpack(frame, buffer + 1, sizeof(buffer) - 1);
        if (ret) {
-               DRM_DEBUG_KMS("Failed to unpack infoframe type 0x%02x\n", type);
+               drm_dbg_kms(encoder->base.dev,
+                           "Failed to unpack infoframe type 0x%02x\n", type);
                return;
        }
 
        if (frame->any.type != type)
-               DRM_DEBUG_KMS("Found the wrong infoframe type 0x%x (expected 0x%02x)\n",
-                             frame->any.type, type);
+               drm_dbg_kms(encoder->base.dev,
+                           "Found the wrong infoframe type 0x%x (expected 0x%02x)\n",
+                           frame->any.type, type);
 }
 
 static bool
@@ -853,7 +854,8 @@ intel_hdmi_compute_drm_infoframe(struct intel_encoder *encoder,
 
        ret = drm_hdmi_infoframe_set_hdr_metadata(frame, conn_state);
        if (ret < 0) {
-               DRM_DEBUG_KMS("couldn't set HDR metadata in infoframe\n");
+               drm_dbg_kms(&dev_priv->drm,
+                           "couldn't set HDR metadata in infoframe\n");
                return false;
        }
 
@@ -893,8 +895,9 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
                if (!(val & VIDEO_DIP_ENABLE))
                        return;
                if (port != (val & VIDEO_DIP_PORT_MASK)) {
-                       DRM_DEBUG_KMS("video DIP still enabled on port %c\n",
-                                     (val & VIDEO_DIP_PORT_MASK) >> 29);
+                       drm_dbg_kms(&dev_priv->drm,
+                                   "video DIP still enabled on port %c\n",
+                                   (val & VIDEO_DIP_PORT_MASK) >> 29);
                        return;
                }
                val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
@@ -906,8 +909,9 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
 
        if (port != (val & VIDEO_DIP_PORT_MASK)) {
                if (val & VIDEO_DIP_ENABLE) {
-                       DRM_DEBUG_KMS("video DIP already enabled on port %c\n",
-                                     (val & VIDEO_DIP_PORT_MASK) >> 29);
+                       drm_dbg_kms(&dev_priv->drm,
+                                   "video DIP already enabled on port %c\n",
+                                   (val & VIDEO_DIP_PORT_MASK) >> 29);
                        return;
                }
                val &= ~VIDEO_DIP_PORT_MASK;
@@ -1264,8 +1268,8 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
        if (hdmi->dp_dual_mode.type < DRM_DP_DUAL_MODE_TYPE2_DVI)
                return;
 
-       DRM_DEBUG_KMS("%s DP dual mode adaptor TMDS output\n",
-                     enable ? "Enabling" : "Disabling");
+       drm_dbg_kms(&dev_priv->drm, "%s DP dual mode adaptor TMDS output\n",
+                   enable ? "Enabling" : "Disabling");
 
        drm_dp_dual_mode_set_tmds_output(hdmi->dp_dual_mode.type,
                                         adapter, enable);
@@ -1346,13 +1350,14 @@ int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
        ret = intel_hdmi_hdcp_write(intel_dig_port, DRM_HDCP_DDC_AN, an,
                                    DRM_HDCP_AN_LEN);
        if (ret) {
-               DRM_DEBUG_KMS("Write An over DDC failed (%d)\n", ret);
+               drm_dbg_kms(&i915->drm, "Write An over DDC failed (%d)\n",
+                           ret);
                return ret;
        }
 
        ret = intel_gmbus_output_aksv(adapter);
        if (ret < 0) {
-               DRM_DEBUG_KMS("Failed to output aksv (%d)\n", ret);
+               drm_dbg_kms(&i915->drm, "Failed to output aksv (%d)\n", ret);
                return ret;
        }
        return 0;
@@ -1361,11 +1366,14 @@ int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
 static int intel_hdmi_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
                                     u8 *bksv)
 {
+       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+
        int ret;
        ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BKSV, bksv,
                                   DRM_HDCP_KSV_LEN);
        if (ret)
-               DRM_DEBUG_KMS("Read Bksv over DDC failed (%d)\n", ret);
+               drm_dbg_kms(&i915->drm, "Read Bksv over DDC failed (%d)\n",
+                           ret);
        return ret;
 }
 
@@ -1373,11 +1381,14 @@ static
 int intel_hdmi_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
                                 u8 *bstatus)
 {
+       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+
        int ret;
        ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BSTATUS,
                                   bstatus, DRM_HDCP_BSTATUS_LEN);
        if (ret)
-               DRM_DEBUG_KMS("Read bstatus over DDC failed (%d)\n", ret);
+               drm_dbg_kms(&i915->drm, "Read bstatus over DDC failed (%d)\n",
+                           ret);
        return ret;
 }
 
@@ -1385,12 +1396,14 @@ static
 int intel_hdmi_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
                                     bool *repeater_present)
 {
+       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
        int ret;
        u8 val;
 
        ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
        if (ret) {
-               DRM_DEBUG_KMS("Read bcaps over DDC failed (%d)\n", ret);
+               drm_dbg_kms(&i915->drm, "Read bcaps over DDC failed (%d)\n",
+                           ret);
                return ret;
        }
        *repeater_present = val & DRM_HDCP_DDC_BCAPS_REPEATER_PRESENT;
@@ -1401,11 +1414,14 @@ static
 int intel_hdmi_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
                                  u8 *ri_prime)
 {
+       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+
        int ret;
        ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_RI_PRIME,
                                   ri_prime, DRM_HDCP_RI_LEN);
        if (ret)
-               DRM_DEBUG_KMS("Read Ri' over DDC failed (%d)\n", ret);
+               drm_dbg_kms(&i915->drm, "Read Ri' over DDC failed (%d)\n",
+                           ret);
        return ret;
 }
 
@@ -1413,12 +1429,14 @@ static
 int intel_hdmi_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
                                   bool *ksv_ready)
 {
+       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
        int ret;
        u8 val;
 
        ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
        if (ret) {
-               DRM_DEBUG_KMS("Read bcaps over DDC failed (%d)\n", ret);
+               drm_dbg_kms(&i915->drm, "Read bcaps over DDC failed (%d)\n",
+                           ret);
                return ret;
        }
        *ksv_ready = val & DRM_HDCP_DDC_BCAPS_KSV_FIFO_READY;
@@ -1429,11 +1447,13 @@ static
 int intel_hdmi_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
                                  int num_downstream, u8 *ksv_fifo)
 {
+       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
        int ret;
        ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_KSV_FIFO,
                                   ksv_fifo, num_downstream * DRM_HDCP_KSV_LEN);
        if (ret) {
-               DRM_DEBUG_KMS("Read ksv fifo over DDC failed (%d)\n", ret);
+               drm_dbg_kms(&i915->drm,
+                           "Read ksv fifo over DDC failed (%d)\n", ret);
                return ret;
        }
        return 0;
@@ -1443,6 +1463,7 @@ static
 int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
                                      int i, u32 *part)
 {
+       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
        int ret;
 
        if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
@@ -1451,7 +1472,8 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
        ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_V_PRIME(i),
                                   part, DRM_HDCP_V_PRIME_PART_LEN);
        if (ret)
-               DRM_DEBUG_KMS("Read V'[%d] over DDC failed (%d)\n", i, ret);
+               drm_dbg_kms(&i915->drm, "Read V'[%d] over DDC failed (%d)\n",
+                           i, ret);
        return ret;
 }
 
@@ -1474,12 +1496,14 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector)
 
        ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, false);
        if (ret) {
-               DRM_ERROR("Disable HDCP signalling failed (%d)\n", ret);
+               drm_err(&dev_priv->drm,
+                       "Disable HDCP signalling failed (%d)\n", ret);
                return ret;
        }
        ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, true);
        if (ret) {
-               DRM_ERROR("Enable HDCP signalling failed (%d)\n", ret);
+               drm_err(&dev_priv->drm,
+                       "Enable HDCP signalling failed (%d)\n", ret);
                return ret;
        }
 
@@ -1500,8 +1524,8 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
 
        ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, enable);
        if (ret) {
-               DRM_ERROR("%s HDCP signalling failed (%d)\n",
-                         enable ? "Enable" : "Disable", ret);
+               drm_err(&dev_priv->drm, "%s HDCP signalling failed (%d)\n",
+                       enable ? "Enable" : "Disable", ret);
                return ret;
        }
 
@@ -1539,8 +1563,10 @@ bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
        if (wait_for((intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) &
                      (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC)) ==
                     (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) {
-               DRM_ERROR("Ri' mismatch detected, link check failed (%x)\n",
-                         intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)));
+               drm_err(&i915->drm,
+                       "Ri' mismatch detected, link check failed (%x)\n",
+                       intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder,
+                                                       port)));
                return false;
        }
        return true;
@@ -1588,17 +1614,19 @@ static int get_hdcp2_msg_timeout(u8 msg_id, bool is_paired)
        return -EINVAL;
 }
 
-static inline
-int hdcp2_detect_msg_availability(struct intel_digital_port *intel_digital_port,
-                                 u8 msg_id, bool *msg_ready,
-                                 ssize_t *msg_sz)
+static int
+hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
+                             u8 msg_id, bool *msg_ready,
+                             ssize_t *msg_sz)
 {
+       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
        u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN];
        int ret;
 
-       ret = intel_hdmi_hdcp2_read_rx_status(intel_digital_port, rx_status);
+       ret = intel_hdmi_hdcp2_read_rx_status(intel_dig_port, rx_status);
        if (ret < 0) {
-               DRM_DEBUG_KMS("rx_status read failed. Err %d\n", ret);
+               drm_dbg_kms(&i915->drm, "rx_status read failed. Err %d\n",
+                           ret);
                return ret;
        }
 
@@ -1618,6 +1646,7 @@ static ssize_t
 intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
                              u8 msg_id, bool paired)
 {
+       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
        bool msg_ready = false;
        int timeout, ret;
        ssize_t msg_sz = 0;
@@ -1632,8 +1661,8 @@ intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
                         !ret && msg_ready && msg_sz, timeout * 1000,
                         1000, 5 * 1000);
        if (ret)
-               DRM_DEBUG_KMS("msg_id: %d, ret: %d, timeout: %d\n",
-                             msg_id, ret, timeout);
+               drm_dbg_kms(&i915->drm, "msg_id: %d, ret: %d, timeout: %d\n",
+                           msg_id, ret, timeout);
 
        return ret ? ret : msg_sz;
 }
@@ -1652,6 +1681,7 @@ static
 int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
                              u8 msg_id, void *buf, size_t size)
 {
+       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
        struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
        struct intel_hdcp *hdcp = &hdmi->attached_connector->hdcp;
        unsigned int offset;
@@ -1667,15 +1697,17 @@ int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
         * available buffer.
         */
        if (ret > size) {
-               DRM_DEBUG_KMS("msg_sz(%zd) is more than exp size(%zu)\n",
-                             ret, size);
+               drm_dbg_kms(&i915->drm,
+                           "msg_sz(%zd) is more than exp size(%zu)\n",
+                           ret, size);
                return -1;
        }
 
        offset = HDCP_2_2_HDMI_REG_RD_MSG_OFFSET;
        ret = intel_hdmi_hdcp_read(intel_dig_port, offset, buf, ret);
        if (ret)
-               DRM_DEBUG_KMS("Failed to read msg_id: %d(%zd)\n", msg_id, ret);
+               drm_dbg_kms(&i915->drm, "Failed to read msg_id: %d(%zd)\n",
+                           msg_id, ret);
 
        return ret;
 }
@@ -1718,12 +1750,6 @@ int intel_hdmi_hdcp2_capable(struct intel_digital_port *intel_dig_port,
        return ret;
 }
 
-static inline
-enum hdcp_wired_protocol intel_hdmi_hdcp2_protocol(void)
-{
-       return HDCP_PROTOCOL_HDMI;
-}
-
 static const struct intel_hdcp_shim intel_hdmi_hdcp_shim = {
        .write_an_aksv = intel_hdmi_hdcp_write_an_aksv,
        .read_bksv = intel_hdmi_hdcp_read_bksv,
@@ -1871,15 +1897,17 @@ static void intel_enable_hdmi_audio(struct intel_encoder *encoder,
                                    const struct intel_crtc_state *pipe_config,
                                    const struct drm_connector_state *conn_state)
 {
+       struct drm_i915_private *i915 = to_i915(encoder->base.dev);
        struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
 
-       drm_WARN_ON(encoder->base.dev, !pipe_config->has_hdmi_sink);
-       DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
-                        pipe_name(crtc->pipe));
+       drm_WARN_ON(&i915->drm, !pipe_config->has_hdmi_sink);
+       drm_dbg_kms(&i915->drm, "Enabling HDMI audio on pipe %c\n",
+                   pipe_name(crtc->pipe));
        intel_audio_codec_enable(encoder, pipe_config, conn_state);
 }
 
-static void g4x_enable_hdmi(struct intel_encoder *encoder,
+static void g4x_enable_hdmi(struct intel_atomic_state *state,
+                           struct intel_encoder *encoder,
                            const struct intel_crtc_state *pipe_config,
                            const struct drm_connector_state *conn_state)
 {
@@ -1901,7 +1929,8 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder,
                intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
 }
 
-static void ibx_enable_hdmi(struct intel_encoder *encoder,
+static void ibx_enable_hdmi(struct intel_atomic_state *state,
+                           struct intel_encoder *encoder,
                            const struct intel_crtc_state *pipe_config,
                            const struct drm_connector_state *conn_state)
 {
@@ -1952,7 +1981,8 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
                intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
 }
 
-static void cpt_enable_hdmi(struct intel_encoder *encoder,
+static void cpt_enable_hdmi(struct intel_atomic_state *state,
+                           struct intel_encoder *encoder,
                            const struct intel_crtc_state *pipe_config,
                            const struct drm_connector_state *conn_state)
 {
@@ -2005,13 +2035,15 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
                intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
 }
 
-static void vlv_enable_hdmi(struct intel_encoder *encoder,
+static void vlv_enable_hdmi(struct intel_atomic_state *state,
+                           struct intel_encoder *encoder,
                            const struct intel_crtc_state *pipe_config,
                            const struct drm_connector_state *conn_state)
 {
 }
 
-static void intel_disable_hdmi(struct intel_encoder *encoder,
+static void intel_disable_hdmi(struct intel_atomic_state *state,
+                              struct intel_encoder *encoder,
                               const struct intel_crtc_state *old_crtc_state,
                               const struct drm_connector_state *old_conn_state)
 {
@@ -2069,7 +2101,8 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
        intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
 }
 
-static void g4x_disable_hdmi(struct intel_encoder *encoder,
+static void g4x_disable_hdmi(struct intel_atomic_state *state,
+                            struct intel_encoder *encoder,
                             const struct intel_crtc_state *old_crtc_state,
                             const struct drm_connector_state *old_conn_state)
 {
@@ -2077,10 +2110,11 @@ static void g4x_disable_hdmi(struct intel_encoder *encoder,
                intel_audio_codec_disable(encoder,
                                          old_crtc_state, old_conn_state);
 
-       intel_disable_hdmi(encoder, old_crtc_state, old_conn_state);
+       intel_disable_hdmi(state, encoder, old_crtc_state, old_conn_state);
 }
 
-static void pch_disable_hdmi(struct intel_encoder *encoder,
+static void pch_disable_hdmi(struct intel_atomic_state *state,
+                            struct intel_encoder *encoder,
                             const struct intel_crtc_state *old_crtc_state,
                             const struct drm_connector_state *old_conn_state)
 {
@@ -2089,11 +2123,12 @@ static void pch_disable_hdmi(struct intel_encoder *encoder,
                                          old_crtc_state, old_conn_state);
 }
 
-static void pch_post_disable_hdmi(struct intel_encoder *encoder,
+static void pch_post_disable_hdmi(struct intel_atomic_state *state,
+                                 struct intel_encoder *encoder,
                                  const struct intel_crtc_state *old_crtc_state,
                                  const struct drm_connector_state *old_conn_state)
 {
-       intel_disable_hdmi(encoder, old_crtc_state, old_conn_state);
+       intel_disable_hdmi(state, encoder, old_crtc_state, old_conn_state);
 }
 
 static int intel_hdmi_source_max_tmds_clock(struct intel_encoder *encoder)
@@ -2286,29 +2321,27 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
        return true;
 }
 
-static bool
-intel_hdmi_ycbcr420_config(struct drm_connector *connector,
-                          struct intel_crtc_state *config)
+static int
+intel_hdmi_ycbcr420_config(struct intel_crtc_state *crtc_state,
+                          const struct drm_connector_state *conn_state)
 {
-       struct intel_crtc *intel_crtc = to_intel_crtc(config->uapi.crtc);
-
-       if (!connector->ycbcr_420_allowed) {
-               DRM_ERROR("Platform doesn't support YCBCR420 output\n");
-               return false;
-       }
+       struct drm_connector *connector = conn_state->connector;
+       struct drm_i915_private *i915 = to_i915(connector->dev);
+       const struct drm_display_mode *adjusted_mode =
+               &crtc_state->hw.adjusted_mode;
 
-       config->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
+       if (!drm_mode_is_420_only(&connector->display_info, adjusted_mode))
+               return 0;
 
-       /* YCBCR 420 output conversion needs a scaler */
-       if (skl_update_scaler_crtc(config)) {
-               DRM_DEBUG_KMS("Scaler allocation for output failed\n");
-               return false;
+       if (!connector->ycbcr_420_allowed) {
+               drm_err(&i915->drm,
+                       "Platform doesn't support YCBCR420 output\n");
+               return -EINVAL;
        }
 
-       intel_pch_panel_fitting(intel_crtc, config,
-                               DRM_MODE_SCALE_FULLSCREEN);
+       crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
 
-       return true;
+       return intel_pch_panel_fitting(crtc_state, conn_state);
 }
 
 static int intel_hdmi_port_clock(int clock, int bpc)
@@ -2342,6 +2375,7 @@ static int intel_hdmi_compute_bpc(struct intel_encoder *encoder,
 static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
                                    struct intel_crtc_state *crtc_state)
 {
+       struct drm_i915_private *i915 = to_i915(encoder->base.dev);
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
        const struct drm_display_mode *adjusted_mode =
                &crtc_state->hw.adjusted_mode;
@@ -2366,13 +2400,15 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
        if (crtc_state->pipe_bpp > bpc * 3)
                crtc_state->pipe_bpp = bpc * 3;
 
-       DRM_DEBUG_KMS("picking %d bpc for HDMI output (pipe bpp: %d)\n",
-                     bpc, crtc_state->pipe_bpp);
+       drm_dbg_kms(&i915->drm,
+                   "picking %d bpc for HDMI output (pipe bpp: %d)\n",
+                   bpc, crtc_state->pipe_bpp);
 
        if (hdmi_port_clock_valid(intel_hdmi, crtc_state->port_clock,
                                  false, crtc_state->has_hdmi_sink) != MODE_OK) {
-               DRM_DEBUG_KMS("unsupported HDMI clock (%d kHz), rejecting mode\n",
-                             crtc_state->port_clock);
+               drm_dbg_kms(&i915->drm,
+                           "unsupported HDMI clock (%d kHz), rejecting mode\n",
+                           crtc_state->port_clock);
                return -EINVAL;
        }
 
@@ -2433,12 +2469,9 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
        if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
                pipe_config->pixel_multiplier = 2;
 
-       if (drm_mode_is_420_only(&connector->display_info, adjusted_mode)) {
-               if (!intel_hdmi_ycbcr420_config(connector, pipe_config)) {
-                       DRM_ERROR("Can't support YCBCR420 output\n");
-                       return -EINVAL;
-               }
-       }
+       ret = intel_hdmi_ycbcr420_config(pipe_config, conn_state);
+       if (ret)
+               return ret;
 
        pipe_config->limited_color_range =
                intel_hdmi_limited_color_range(pipe_config, conn_state);
@@ -2475,25 +2508,26 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
                }
        }
 
-       intel_hdmi_compute_gcp_infoframe(encoder, pipe_config, conn_state);
+       intel_hdmi_compute_gcp_infoframe(encoder, pipe_config,
+                                        conn_state);
 
        if (!intel_hdmi_compute_avi_infoframe(encoder, pipe_config, conn_state)) {
-               DRM_DEBUG_KMS("bad AVI infoframe\n");
+               drm_dbg_kms(&dev_priv->drm, "bad AVI infoframe\n");
                return -EINVAL;
        }
 
        if (!intel_hdmi_compute_spd_infoframe(encoder, pipe_config, conn_state)) {
-               DRM_DEBUG_KMS("bad SPD infoframe\n");
+               drm_dbg_kms(&dev_priv->drm, "bad SPD infoframe\n");
                return -EINVAL;
        }
 
        if (!intel_hdmi_compute_hdmi_infoframe(encoder, pipe_config, conn_state)) {
-               DRM_DEBUG_KMS("bad HDMI infoframe\n");
+               drm_dbg_kms(&dev_priv->drm, "bad HDMI infoframe\n");
                return -EINVAL;
        }
 
        if (!intel_hdmi_compute_drm_infoframe(encoder, pipe_config, conn_state)) {
-               DRM_DEBUG_KMS("bad DRM infoframe\n");
+               drm_dbg_kms(&dev_priv->drm, "bad DRM infoframe\n");
                return -EINVAL;
        }
 
@@ -2543,7 +2577,8 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
                 */
                if (has_edid && !connector->override_edid &&
                    intel_bios_is_port_dp_dual_mode(dev_priv, port)) {
-                       DRM_DEBUG_KMS("Assuming DP dual mode adaptor presence based on VBT\n");
+                       drm_dbg_kms(&dev_priv->drm,
+                                   "Assuming DP dual mode adaptor presence based on VBT\n");
                        type = DRM_DP_DUAL_MODE_TYPE1_DVI;
                } else {
                        type = DRM_DP_DUAL_MODE_NONE;
@@ -2557,9 +2592,10 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
        hdmi->dp_dual_mode.max_tmds_clock =
                drm_dp_dual_mode_max_tmds_clock(type, adapter);
 
-       DRM_DEBUG_KMS("DP dual mode adaptor (%s) detected (max TMDS clock: %d kHz)\n",
-                     drm_dp_get_dual_mode_type_name(type),
-                     hdmi->dp_dual_mode.max_tmds_clock);
+       drm_dbg_kms(&dev_priv->drm,
+                   "DP dual mode adaptor (%s) detected (max TMDS clock: %d kHz)\n",
+                   drm_dp_get_dual_mode_type_name(type),
+                   hdmi->dp_dual_mode.max_tmds_clock);
 }
 
 static bool
@@ -2579,7 +2615,8 @@ intel_hdmi_set_edid(struct drm_connector *connector)
        edid = drm_get_edid(connector, i2c);
 
        if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
-               DRM_DEBUG_KMS("HDMI GMBUS EDID read failed, retry using GPIO bit-banging\n");
+               drm_dbg_kms(&dev_priv->drm,
+                           "HDMI GMBUS EDID read failed, retry using GPIO bit-banging\n");
                intel_gmbus_force_bit(i2c, true);
                edid = drm_get_edid(connector, i2c);
                intel_gmbus_force_bit(i2c, false);
@@ -2611,8 +2648,8 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
        struct intel_encoder *encoder = &hdmi_to_dig_port(intel_hdmi)->base;
        intel_wakeref_t wakeref;
 
-       DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
-                     connector->base.id, connector->name);
+       drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
+                   connector->base.id, connector->name);
 
        wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
 
@@ -2643,8 +2680,10 @@ out:
 static void
 intel_hdmi_force(struct drm_connector *connector)
 {
-       DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
-                     connector->base.id, connector->name);
+       struct drm_i915_private *i915 = to_i915(connector->dev);
+
+       drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
+                   connector->base.id, connector->name);
 
        intel_hdmi_unset_edid(connector);
 
@@ -2665,7 +2704,8 @@ static int intel_hdmi_get_modes(struct drm_connector *connector)
        return intel_connector_update_modes(connector, edid);
 }
 
-static void intel_hdmi_pre_enable(struct intel_encoder *encoder,
+static void intel_hdmi_pre_enable(struct intel_atomic_state *state,
+                                 struct intel_encoder *encoder,
                                  const struct intel_crtc_state *pipe_config,
                                  const struct drm_connector_state *conn_state)
 {
@@ -2679,7 +2719,8 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder,
                                       pipe_config, conn_state);
 }
 
-static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
+static void vlv_hdmi_pre_enable(struct intel_atomic_state *state,
+                               struct intel_encoder *encoder,
                                const struct intel_crtc_state *pipe_config,
                                const struct drm_connector_state *conn_state)
 {
@@ -2696,12 +2737,13 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
                              pipe_config->has_infoframe,
                              pipe_config, conn_state);
 
-       g4x_enable_hdmi(encoder, pipe_config, conn_state);
+       g4x_enable_hdmi(state, encoder, pipe_config, conn_state);
 
        vlv_wait_port_ready(dev_priv, dport, 0x0);
 }
 
-static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
+static void vlv_hdmi_pre_pll_enable(struct intel_atomic_state *state,
+                                   struct intel_encoder *encoder,
                                    const struct intel_crtc_state *pipe_config,
                                    const struct drm_connector_state *conn_state)
 {
@@ -2710,7 +2752,8 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
        vlv_phy_pre_pll_enable(encoder, pipe_config);
 }
 
-static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
+static void chv_hdmi_pre_pll_enable(struct intel_atomic_state *state,
+                                   struct intel_encoder *encoder,
                                    const struct intel_crtc_state *pipe_config,
                                    const struct drm_connector_state *conn_state)
 {
@@ -2719,14 +2762,16 @@ static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
        chv_phy_pre_pll_enable(encoder, pipe_config);
 }
 
-static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder,
+static void chv_hdmi_post_pll_disable(struct intel_atomic_state *state,
+                                     struct intel_encoder *encoder,
                                      const struct intel_crtc_state *old_crtc_state,
                                      const struct drm_connector_state *old_conn_state)
 {
        chv_phy_post_pll_disable(encoder, old_crtc_state);
 }
 
-static void vlv_hdmi_post_disable(struct intel_encoder *encoder,
+static void vlv_hdmi_post_disable(struct intel_atomic_state *state,
+                                 struct intel_encoder *encoder,
                                  const struct intel_crtc_state *old_crtc_state,
                                  const struct drm_connector_state *old_conn_state)
 {
@@ -2734,7 +2779,8 @@ static void vlv_hdmi_post_disable(struct intel_encoder *encoder,
        vlv_phy_reset_lanes(encoder, old_crtc_state);
 }
 
-static void chv_hdmi_post_disable(struct intel_encoder *encoder,
+static void chv_hdmi_post_disable(struct intel_atomic_state *state,
+                                 struct intel_encoder *encoder,
                                  const struct intel_crtc_state *old_crtc_state,
                                  const struct drm_connector_state *old_conn_state)
 {
@@ -2749,7 +2795,8 @@ static void chv_hdmi_post_disable(struct intel_encoder *encoder,
        vlv_dpio_put(dev_priv);
 }
 
-static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
+static void chv_hdmi_pre_enable(struct intel_atomic_state *state,
+                               struct intel_encoder *encoder,
                                const struct intel_crtc_state *pipe_config,
                                const struct drm_connector_state *conn_state)
 {
@@ -2767,7 +2814,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
                              pipe_config->has_infoframe,
                              pipe_config, conn_state);
 
-       g4x_enable_hdmi(encoder, pipe_config, conn_state);
+       g4x_enable_hdmi(state, encoder, pipe_config, conn_state);
 
        vlv_wait_port_ready(dev_priv, dport, 0x0);
 
@@ -2786,6 +2833,7 @@ intel_hdmi_get_i2c_adapter(struct drm_connector *connector)
 
 static void intel_hdmi_create_i2c_symlink(struct drm_connector *connector)
 {
+       struct drm_i915_private *i915 = to_i915(connector->dev);
        struct i2c_adapter *adapter = intel_hdmi_get_i2c_adapter(connector);
        struct kobject *i2c_kobj = &adapter->dev.kobj;
        struct kobject *connector_kobj = &connector->kdev->kobj;
@@ -2793,7 +2841,7 @@ static void intel_hdmi_create_i2c_symlink(struct drm_connector *connector)
 
        ret = sysfs_create_link(connector_kobj, i2c_kobj, i2c_kobj->name);
        if (ret)
-               DRM_ERROR("Failed to create i2c symlink (%d)\n", ret);
+               drm_err(&i915->drm, "Failed to create i2c symlink (%d)\n", ret);
 }
 
 static void intel_hdmi_remove_i2c_symlink(struct drm_connector *connector)
@@ -2814,8 +2862,6 @@ intel_hdmi_connector_register(struct drm_connector *connector)
        if (ret)
                return ret;
 
-       intel_connector_debugfs_add(connector);
-
        intel_hdmi_create_i2c_symlink(connector);
 
        return ret;
@@ -2922,9 +2968,10 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
        if (!sink_scrambling->supported)
                return true;
 
-       DRM_DEBUG_KMS("[CONNECTOR:%d:%s] scrambling=%s, TMDS bit clock ratio=1/%d\n",
-                     connector->base.id, connector->name,
-                     yesno(scrambling), high_tmds_clock_ratio ? 40 : 10);
+       drm_dbg_kms(&dev_priv->drm,
+                   "[CONNECTOR:%d:%s] scrambling=%s, TMDS bit clock ratio=1/%d\n",
+                   connector->base.id, connector->name,
+                   yesno(scrambling), high_tmds_clock_ratio ? 40 : 10);
 
        /* Set TMDS bit clock ratio to 1/40 or 1/10, and enable/disable scrambling */
        return drm_scdc_set_high_tmds_clock_ratio(adapter,
@@ -3066,8 +3113,9 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
 
        ddc_pin = intel_bios_alternate_ddc_pin(encoder);
        if (ddc_pin) {
-               DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (VBT)\n",
-                             ddc_pin, port_name(port));
+               drm_dbg_kms(&dev_priv->drm,
+                           "Using DDC pin 0x%x for port %c (VBT)\n",
+                           ddc_pin, port_name(port));
                return ddc_pin;
        }
 
@@ -3084,8 +3132,9 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
        else
                ddc_pin = g4x_port_to_ddc_pin(dev_priv, port);
 
-       DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (platform default)\n",
-                     ddc_pin, port_name(port));
+       drm_dbg_kms(&dev_priv->drm,
+                   "Using DDC pin 0x%x for port %c (platform default)\n",
+                   ddc_pin, port_name(port));
 
        return ddc_pin;
 }
@@ -3142,8 +3191,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
        enum port port = intel_encoder->port;
        struct cec_connector_info conn_info;
 
-       DRM_DEBUG_KMS("Adding HDMI connector on [ENCODER:%d:%s]\n",
-                     intel_encoder->base.base.id, intel_encoder->base.name);
+       drm_dbg_kms(&dev_priv->drm,
+                   "Adding HDMI connector on [ENCODER:%d:%s]\n",
+                   intel_encoder->base.base.id, intel_encoder->base.name);
 
        if (INTEL_GEN(dev_priv) < 12 && drm_WARN_ON(dev, port == PORT_A))
                return;
@@ -3187,7 +3237,8 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
                int ret = intel_hdcp_init(intel_connector,
                                          &intel_hdmi_hdcp_shim);
                if (ret)
-                       DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
+                       drm_dbg_kms(&dev_priv->drm,
+                                   "HDCP init failed, skipping.\n");
        }
 
        /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
@@ -3206,16 +3257,16 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
                cec_notifier_conn_register(dev->dev, port_identifier(port),
                                           &conn_info);
        if (!intel_hdmi->cec_notifier)
-               DRM_DEBUG_KMS("CEC notifier get failed\n");
+               drm_dbg_kms(&dev_priv->drm, "CEC notifier get failed\n");
 }
 
 static enum intel_hotplug_state
 intel_hdmi_hotplug(struct intel_encoder *encoder,
-                  struct intel_connector *connector, bool irq_received)
+                  struct intel_connector *connector)
 {
        enum intel_hotplug_state state;
 
-       state = intel_encoder_hotplug(encoder, connector, irq_received);
+       state = intel_encoder_hotplug(encoder, connector);
 
        /*
         * On many platforms the HDMI live state signal is known to be
@@ -3229,7 +3280,7 @@ intel_hdmi_hotplug(struct intel_encoder *encoder,
         * time around we didn't detect any change in the sink's connection
         * status.
         */
-       if (state == INTEL_HOTPLUG_UNCHANGED && irq_received)
+       if (state == INTEL_HOTPLUG_UNCHANGED && !connector->hotplug_retries)
                state = INTEL_HOTPLUG_RETRY;
 
        return state;
index a091442efba4d46668d38499a42f47400bc121c3..4f6f560e093e9224f80f90e58901b7c1c140af75 100644 (file)
@@ -270,8 +270,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
 
 enum intel_hotplug_state
 intel_encoder_hotplug(struct intel_encoder *encoder,
-                     struct intel_connector *connector,
-                     bool irq_received)
+                     struct intel_connector *connector)
 {
        struct drm_device *dev = connector->base.dev;
        enum drm_connector_status old_status;
@@ -392,12 +391,17 @@ static void i915_hotplug_work_func(struct work_struct *work)
                        struct intel_encoder *encoder =
                                intel_attached_encoder(connector);
 
+                       if (hpd_event_bits & hpd_bit)
+                               connector->hotplug_retries = 0;
+                       else
+                               connector->hotplug_retries++;
+
                        drm_dbg_kms(&dev_priv->drm,
-                                   "Connector %s (pin %i) received hotplug event.\n",
-                                   connector->base.name, pin);
+                                   "Connector %s (pin %i) received hotplug event. (retry %d)\n",
+                                   connector->base.name, pin,
+                                   connector->hotplug_retries);
 
-                       switch (encoder->hotplug(encoder, connector,
-                                                hpd_event_bits & hpd_bit)) {
+                       switch (encoder->hotplug(encoder, connector)) {
                        case INTEL_HOTPLUG_UNCHANGED:
                                break;
                        case INTEL_HOTPLUG_CHANGED:
index 1e6b4fda2900ae8cdcfc5b083b2da4b4e4f29950..777b0743257e5487c1a4159320d6eb248cea7548 100644 (file)
@@ -15,8 +15,7 @@ enum port;
 
 void intel_hpd_poll_init(struct drm_i915_private *dev_priv);
 enum intel_hotplug_state intel_encoder_hotplug(struct intel_encoder *encoder,
-                                              struct intel_connector *connector,
-                                              bool irq_received);
+                                              struct intel_connector *connector);
 void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
                           u32 pin_mask, u32 long_mask);
 void intel_hpd_init(struct drm_i915_private *dev_priv);
index d807c5648c87e5b3995a61c6235c4ee28d73a154..6ff7b226f0a177887b07854b0bef42a4e9ae342c 100644 (file)
@@ -522,7 +522,7 @@ u32 lspcon_infoframes_enabled(struct intel_encoder *encoder,
                              const struct intel_crtc_state *pipe_config)
 {
        /* FIXME actually read this from the hw */
-       return enc_to_intel_lspcon(encoder)->active;
+       return 0;
 }
 
 void lspcon_resume(struct intel_lspcon *lspcon)
index 9a067effcfa09aec45100a4b31a69d0ea95f5582..872f2a48933953e325704be42db4f6433c17962f 100644 (file)
@@ -220,7 +220,8 @@ static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
                       REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(pps->t4, 1000) + 1));
 }
 
-static void intel_pre_enable_lvds(struct intel_encoder *encoder,
+static void intel_pre_enable_lvds(struct intel_atomic_state *state,
+                                 struct intel_encoder *encoder,
                                  const struct intel_crtc_state *pipe_config,
                                  const struct drm_connector_state *conn_state)
 {
@@ -301,7 +302,8 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
 /*
  * Sets the power state for the panel.
  */
-static void intel_enable_lvds(struct intel_encoder *encoder,
+static void intel_enable_lvds(struct intel_atomic_state *state,
+                             struct intel_encoder *encoder,
                              const struct intel_crtc_state *pipe_config,
                              const struct drm_connector_state *conn_state)
 {
@@ -323,7 +325,8 @@ static void intel_enable_lvds(struct intel_encoder *encoder,
        intel_panel_enable_backlight(pipe_config, conn_state);
 }
 
-static void intel_disable_lvds(struct intel_encoder *encoder,
+static void intel_disable_lvds(struct intel_atomic_state *state,
+                              struct intel_encoder *encoder,
                               const struct intel_crtc_state *old_crtc_state,
                               const struct drm_connector_state *old_conn_state)
 {
@@ -341,28 +344,31 @@ static void intel_disable_lvds(struct intel_encoder *encoder,
        intel_de_posting_read(dev_priv, lvds_encoder->reg);
 }
 
-static void gmch_disable_lvds(struct intel_encoder *encoder,
+static void gmch_disable_lvds(struct intel_atomic_state *state,
+                             struct intel_encoder *encoder,
                              const struct intel_crtc_state *old_crtc_state,
                              const struct drm_connector_state *old_conn_state)
 
 {
        intel_panel_disable_backlight(old_conn_state);
 
-       intel_disable_lvds(encoder, old_crtc_state, old_conn_state);
+       intel_disable_lvds(state, encoder, old_crtc_state, old_conn_state);
 }
 
-static void pch_disable_lvds(struct intel_encoder *encoder,
+static void pch_disable_lvds(struct intel_atomic_state *state,
+                            struct intel_encoder *encoder,
                             const struct intel_crtc_state *old_crtc_state,
                             const struct drm_connector_state *old_conn_state)
 {
        intel_panel_disable_backlight(old_conn_state);
 }
 
-static void pch_post_disable_lvds(struct intel_encoder *encoder,
+static void pch_post_disable_lvds(struct intel_atomic_state *state,
+                                 struct intel_encoder *encoder,
                                  const struct intel_crtc_state *old_crtc_state,
                                  const struct drm_connector_state *old_conn_state)
 {
-       intel_disable_lvds(encoder, old_crtc_state, old_conn_state);
+       intel_disable_lvds(state, encoder, old_crtc_state, old_conn_state);
 }
 
 static enum drm_mode_status
@@ -397,6 +403,7 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
        struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
        struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
        unsigned int lvds_bpp;
+       int ret;
 
        /* Should never happen!! */
        if (INTEL_GEN(dev_priv) < 4 && intel_crtc->pipe == 0) {
@@ -430,16 +437,15 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
        if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
                return -EINVAL;
 
-       if (HAS_PCH_SPLIT(dev_priv)) {
+       if (HAS_PCH_SPLIT(dev_priv))
                pipe_config->has_pch_encoder = true;
 
-               intel_pch_panel_fitting(intel_crtc, pipe_config,
-                                       conn_state->scaling_mode);
-       } else {
-               intel_gmch_panel_fitting(intel_crtc, pipe_config,
-                                        conn_state->scaling_mode);
-
-       }
+       if (HAS_GMCH(dev_priv))
+               ret = intel_gmch_panel_fitting(pipe_config, conn_state);
+       else
+               ret = intel_pch_panel_fitting(pipe_config, conn_state);
+       if (ret)
+               return ret;
 
        /*
         * XXX: It would be nice to support lower refresh rates on the
index 4811872231017d3ce5ade492f6e6e1ad57d85f2f..66711e62fa71a006544e98c51da0bc901a9ccdfd 100644 (file)
@@ -281,7 +281,7 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
        enum pipe pipe = overlay->crtc->pipe;
        struct intel_frontbuffer *from = NULL, *to = NULL;
 
-       WARN_ON(overlay->old_vma);
+       drm_WARN_ON(&overlay->i915->drm, overlay->old_vma);
 
        if (overlay->vma)
                from = intel_frontbuffer_get(overlay->vma->obj);
@@ -350,7 +350,7 @@ static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
        struct i915_vma *vma;
 
        vma = fetch_and_zero(&overlay->old_vma);
-       if (WARN_ON(!vma))
+       if (drm_WARN_ON(&overlay->i915->drm, !vma))
                return;
 
        intel_frontbuffer_flip_complete(overlay->i915,
@@ -396,7 +396,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
        struct i915_request *rq;
        u32 *cs, flip_addr = overlay->flip_addr;
 
-       WARN_ON(!overlay->active);
+       drm_WARN_ON(&overlay->i915->drm, !overlay->active);
 
        /* According to intel docs the overlay hw may hang (when switching
         * off) without loading the filter coeffs. It is however unclear whether
@@ -1342,7 +1342,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
        if (!HAS_OVERLAY(dev_priv))
                return;
 
-       engine = dev_priv->engine[RCS0];
+       engine = dev_priv->gt.engine[RCS0];
        if (!engine || !engine->kernel_context)
                return;
 
index 276f438708026e1cf0846ffe9eb1e9adaa28a38d..3c5056dbf60793fda6b64132e1b86eef88901391 100644 (file)
@@ -176,24 +176,23 @@ intel_panel_vbt_fixed_mode(struct intel_connector *connector)
 }
 
 /* adjusted_mode has been preset to be the panel's fixed mode */
-void
-intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
-                       struct intel_crtc_state *pipe_config,
-                       int fitting_mode)
+int intel_pch_panel_fitting(struct intel_crtc_state *crtc_state,
+                           const struct drm_connector_state *conn_state)
 {
-       const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
-       int x = 0, y = 0, width = 0, height = 0;
+       const struct drm_display_mode *adjusted_mode =
+               &crtc_state->hw.adjusted_mode;
+       int x, y, width, height;
 
        /* Native modes don't need fitting */
-       if (adjusted_mode->crtc_hdisplay == pipe_config->pipe_src_w &&
-           adjusted_mode->crtc_vdisplay == pipe_config->pipe_src_h &&
-           pipe_config->output_format != INTEL_OUTPUT_FORMAT_YCBCR420)
-               goto done;
+       if (adjusted_mode->crtc_hdisplay == crtc_state->pipe_src_w &&
+           adjusted_mode->crtc_vdisplay == crtc_state->pipe_src_h &&
+           crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420)
+               return 0;
 
-       switch (fitting_mode) {
+       switch (conn_state->scaling_mode) {
        case DRM_MODE_SCALE_CENTER:
-               width = pipe_config->pipe_src_w;
-               height = pipe_config->pipe_src_h;
+               width = crtc_state->pipe_src_w;
+               height = crtc_state->pipe_src_h;
                x = (adjusted_mode->crtc_hdisplay - width + 1)/2;
                y = (adjusted_mode->crtc_vdisplay - height + 1)/2;
                break;
@@ -202,18 +201,18 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
                /* Scale but preserve the aspect ratio */
                {
                        u32 scaled_width = adjusted_mode->crtc_hdisplay
-                               * pipe_config->pipe_src_h;
-                       u32 scaled_height = pipe_config->pipe_src_w
+                               * crtc_state->pipe_src_h;
+                       u32 scaled_height = crtc_state->pipe_src_w
                                * adjusted_mode->crtc_vdisplay;
                        if (scaled_width > scaled_height) { /* pillar */
-                               width = scaled_height / pipe_config->pipe_src_h;
+                               width = scaled_height / crtc_state->pipe_src_h;
                                if (width & 1)
                                        width++;
                                x = (adjusted_mode->crtc_hdisplay - width + 1) / 2;
                                y = 0;
                                height = adjusted_mode->crtc_vdisplay;
                        } else if (scaled_width < scaled_height) { /* letter */
-                               height = scaled_width / pipe_config->pipe_src_w;
+                               height = scaled_width / crtc_state->pipe_src_w;
                                if (height & 1)
                                    height++;
                                y = (adjusted_mode->crtc_vdisplay - height + 1) / 2;
@@ -227,6 +226,10 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
                }
                break;
 
+       case DRM_MODE_SCALE_NONE:
+               WARN_ON(adjusted_mode->crtc_hdisplay != crtc_state->pipe_src_w);
+               WARN_ON(adjusted_mode->crtc_vdisplay != crtc_state->pipe_src_h);
+               /* fall through */
        case DRM_MODE_SCALE_FULLSCREEN:
                x = y = 0;
                width = adjusted_mode->crtc_hdisplay;
@@ -234,14 +237,15 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
                break;
 
        default:
-               WARN(1, "bad panel fit mode: %d\n", fitting_mode);
-               return;
+               MISSING_CASE(conn_state->scaling_mode);
+               return -EINVAL;
        }
 
-done:
-       pipe_config->pch_pfit.pos = (x << 16) | y;
-       pipe_config->pch_pfit.size = (width << 16) | height;
-       pipe_config->pch_pfit.enabled = pipe_config->pch_pfit.size != 0;
+       drm_rect_init(&crtc_state->pch_pfit.dst,
+                     x, y, width, height);
+       crtc_state->pch_pfit.enabled = true;
+
+       return 0;
 }
 
 static void
@@ -287,7 +291,7 @@ centre_vertically(struct drm_display_mode *adjusted_mode,
        adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + sync_width;
 }
 
-static inline u32 panel_fitter_scaling(u32 source, u32 target)
+static u32 panel_fitter_scaling(u32 source, u32 target)
 {
        /*
         * Floating point operation is not supported. So the FACTOR
@@ -300,13 +304,14 @@ static inline u32 panel_fitter_scaling(u32 source, u32 target)
        return (FACTOR * ratio + FACTOR/2) / FACTOR;
 }
 
-static void i965_scale_aspect(struct intel_crtc_state *pipe_config,
+static void i965_scale_aspect(struct intel_crtc_state *crtc_state,
                              u32 *pfit_control)
 {
-       const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+       const struct drm_display_mode *adjusted_mode =
+               &crtc_state->hw.adjusted_mode;
        u32 scaled_width = adjusted_mode->crtc_hdisplay *
-               pipe_config->pipe_src_h;
-       u32 scaled_height = pipe_config->pipe_src_w *
+               crtc_state->pipe_src_h;
+       u32 scaled_height = crtc_state->pipe_src_w *
                adjusted_mode->crtc_vdisplay;
 
        /* 965+ is easy, it does everything in hw */
@@ -316,18 +321,18 @@ static void i965_scale_aspect(struct intel_crtc_state *pipe_config,
        else if (scaled_width < scaled_height)
                *pfit_control |= PFIT_ENABLE |
                        PFIT_SCALING_LETTER;
-       else if (adjusted_mode->crtc_hdisplay != pipe_config->pipe_src_w)
+       else if (adjusted_mode->crtc_hdisplay != crtc_state->pipe_src_w)
                *pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
 }
 
-static void i9xx_scale_aspect(struct intel_crtc_state *pipe_config,
+static void i9xx_scale_aspect(struct intel_crtc_state *crtc_state,
                              u32 *pfit_control, u32 *pfit_pgm_ratios,
                              u32 *border)
 {
-       struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+       struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
        u32 scaled_width = adjusted_mode->crtc_hdisplay *
-               pipe_config->pipe_src_h;
-       u32 scaled_height = pipe_config->pipe_src_w *
+               crtc_state->pipe_src_h;
+       u32 scaled_height = crtc_state->pipe_src_w *
                adjusted_mode->crtc_vdisplay;
        u32 bits;
 
@@ -339,11 +344,11 @@ static void i9xx_scale_aspect(struct intel_crtc_state *pipe_config,
        if (scaled_width > scaled_height) { /* pillar */
                centre_horizontally(adjusted_mode,
                                    scaled_height /
-                                   pipe_config->pipe_src_h);
+                                   crtc_state->pipe_src_h);
 
                *border = LVDS_BORDER_ENABLE;
-               if (pipe_config->pipe_src_h != adjusted_mode->crtc_vdisplay) {
-                       bits = panel_fitter_scaling(pipe_config->pipe_src_h,
+               if (crtc_state->pipe_src_h != adjusted_mode->crtc_vdisplay) {
+                       bits = panel_fitter_scaling(crtc_state->pipe_src_h,
                                                    adjusted_mode->crtc_vdisplay);
 
                        *pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
@@ -355,11 +360,11 @@ static void i9xx_scale_aspect(struct intel_crtc_state *pipe_config,
        } else if (scaled_width < scaled_height) { /* letter */
                centre_vertically(adjusted_mode,
                                  scaled_width /
-                                 pipe_config->pipe_src_w);
+                                 crtc_state->pipe_src_w);
 
                *border = LVDS_BORDER_ENABLE;
-               if (pipe_config->pipe_src_w != adjusted_mode->crtc_hdisplay) {
-                       bits = panel_fitter_scaling(pipe_config->pipe_src_w,
+               if (crtc_state->pipe_src_w != adjusted_mode->crtc_hdisplay) {
+                       bits = panel_fitter_scaling(crtc_state->pipe_src_w,
                                                    adjusted_mode->crtc_hdisplay);
 
                        *pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
@@ -377,35 +382,35 @@ static void i9xx_scale_aspect(struct intel_crtc_state *pipe_config,
        }
 }
 
-void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
-                             struct intel_crtc_state *pipe_config,
-                             int fitting_mode)
+int intel_gmch_panel_fitting(struct intel_crtc_state *crtc_state,
+                            const struct drm_connector_state *conn_state)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
-       struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+       struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
 
        /* Native modes don't need fitting */
-       if (adjusted_mode->crtc_hdisplay == pipe_config->pipe_src_w &&
-           adjusted_mode->crtc_vdisplay == pipe_config->pipe_src_h)
+       if (adjusted_mode->crtc_hdisplay == crtc_state->pipe_src_w &&
+           adjusted_mode->crtc_vdisplay == crtc_state->pipe_src_h)
                goto out;
 
-       switch (fitting_mode) {
+       switch (conn_state->scaling_mode) {
        case DRM_MODE_SCALE_CENTER:
                /*
                 * For centered modes, we have to calculate border widths &
                 * heights and modify the values programmed into the CRTC.
                 */
-               centre_horizontally(adjusted_mode, pipe_config->pipe_src_w);
-               centre_vertically(adjusted_mode, pipe_config->pipe_src_h);
+               centre_horizontally(adjusted_mode, crtc_state->pipe_src_w);
+               centre_vertically(adjusted_mode, crtc_state->pipe_src_h);
                border = LVDS_BORDER_ENABLE;
                break;
        case DRM_MODE_SCALE_ASPECT:
                /* Scale but preserve the aspect ratio */
                if (INTEL_GEN(dev_priv) >= 4)
-                       i965_scale_aspect(pipe_config, &pfit_control);
+                       i965_scale_aspect(crtc_state, &pfit_control);
                else
-                       i9xx_scale_aspect(pipe_config, &pfit_control,
+                       i9xx_scale_aspect(crtc_state, &pfit_control,
                                          &pfit_pgm_ratios, &border);
                break;
        case DRM_MODE_SCALE_FULLSCREEN:
@@ -413,8 +418,8 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
                 * Full scaling, even if it changes the aspect ratio.
                 * Fortunately this is all done for us in hw.
                 */
-               if (pipe_config->pipe_src_h != adjusted_mode->crtc_vdisplay ||
-                   pipe_config->pipe_src_w != adjusted_mode->crtc_hdisplay) {
+               if (crtc_state->pipe_src_h != adjusted_mode->crtc_vdisplay ||
+                   crtc_state->pipe_src_w != adjusted_mode->crtc_hdisplay) {
                        pfit_control |= PFIT_ENABLE;
                        if (INTEL_GEN(dev_priv) >= 4)
                                pfit_control |= PFIT_SCALING_AUTO;
@@ -426,15 +431,14 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
                }
                break;
        default:
-               drm_WARN(&dev_priv->drm, 1, "bad panel fit mode: %d\n",
-                        fitting_mode);
-               return;
+               MISSING_CASE(conn_state->scaling_mode);
+               return -EINVAL;
        }
 
        /* 965+ wants fuzzy fitting */
        /* FIXME: handle multiple panels by failing gracefully */
        if (INTEL_GEN(dev_priv) >= 4)
-               pfit_control |= PFIT_PIPE(intel_crtc->pipe) | PFIT_FILTER_FUZZY;
+               pfit_control |= PFIT_PIPE(crtc->pipe) | PFIT_FILTER_FUZZY;
 
 out:
        if ((pfit_control & PFIT_ENABLE) == 0) {
@@ -443,12 +447,14 @@ out:
        }
 
        /* Make sure pre-965 set dither correctly for 18bpp panels. */
-       if (INTEL_GEN(dev_priv) < 4 && pipe_config->pipe_bpp == 18)
+       if (INTEL_GEN(dev_priv) < 4 && crtc_state->pipe_bpp == 18)
                pfit_control |= PANEL_8TO6_DITHER_ENABLE;
 
-       pipe_config->gmch_pfit.control = pfit_control;
-       pipe_config->gmch_pfit.pgm_ratios = pfit_pgm_ratios;
-       pipe_config->gmch_pfit.lvds_border_bits = border;
+       crtc_state->gmch_pfit.control = pfit_control;
+       crtc_state->gmch_pfit.pgm_ratios = pfit_pgm_ratios;
+       crtc_state->gmch_pfit.lvds_border_bits = border;
+
+       return 0;
 }
 
 /**
@@ -483,20 +489,10 @@ static u32 scale(u32 source_val,
        return target_val;
 }
 
-/* Scale user_level in range [0..user_max] to [hw_min..hw_max]. */
-static inline u32 scale_user_to_hw(struct intel_connector *connector,
-                                  u32 user_level, u32 user_max)
-{
-       struct intel_panel *panel = &connector->panel;
-
-       return scale(user_level, 0, user_max,
-                    panel->backlight.min, panel->backlight.max);
-}
-
 /* Scale user_level in range [0..user_max] to [0..hw_max], clamping the result
  * to [hw_min..hw_max]. */
-static inline u32 clamp_user_to_hw(struct intel_connector *connector,
-                                  u32 user_level, u32 user_max)
+static u32 clamp_user_to_hw(struct intel_connector *connector,
+                           u32 user_level, u32 user_max)
 {
        struct intel_panel *panel = &connector->panel;
        u32 hw_level;
@@ -508,8 +504,8 @@ static inline u32 clamp_user_to_hw(struct intel_connector *connector,
 }
 
 /* Scale hw_level in range [hw_min..hw_max] to [0..user_max]. */
-static inline u32 scale_hw_to_user(struct intel_connector *connector,
-                                  u32 hw_level, u32 user_max)
+static u32 scale_hw_to_user(struct intel_connector *connector,
+                           u32 hw_level, u32 user_max)
 {
        struct intel_panel *panel = &connector->panel;
 
@@ -684,9 +680,10 @@ static void
 intel_panel_actually_set_backlight(const struct drm_connector_state *conn_state, u32 level)
 {
        struct intel_connector *connector = to_intel_connector(conn_state->connector);
+       struct drm_i915_private *i915 = to_i915(connector->base.dev);
        struct intel_panel *panel = &connector->panel;
 
-       DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
+       drm_dbg_kms(&i915->drm, "set backlight PWM = %d\n", level);
 
        level = intel_panel_compute_brightness(connector, level);
        panel->backlight.set(conn_state, level);
@@ -867,8 +864,8 @@ void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_st
         * another client is not activated.
         */
        if (dev_priv->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) {
-               drm_dbg(&dev_priv->drm,
-                       "Skipping backlight disable on vga switch\n");
+               drm_dbg_kms(&dev_priv->drm,
+                           "Skipping backlight disable on vga switch\n");
                return;
        }
 
@@ -1244,10 +1241,20 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector)
 
        mutex_unlock(&dev_priv->backlight_lock);
 
-       drm_dbg(&dev_priv->drm, "get backlight PWM = %d\n", val);
+       drm_dbg_kms(&dev_priv->drm, "get backlight PWM = %d\n", val);
        return val;
 }
 
+/* Scale user_level in range [0..user_max] to [hw_min..hw_max]. */
+static u32 scale_user_to_hw(struct intel_connector *connector,
+                           u32 user_level, u32 user_max)
+{
+       struct intel_panel *panel = &connector->panel;
+
+       return scale(user_level, 0, user_max,
+                    panel->backlight.min, panel->backlight.max);
+}
+
 /* set backlight brightness to level in range [0..max], scaling wrt hw min */
 static void intel_panel_set_backlight(const struct drm_connector_state *conn_state,
                                      u32 user_level, u32 user_max)
@@ -1335,6 +1342,7 @@ static const struct backlight_ops intel_backlight_device_ops = {
 
 int intel_backlight_device_register(struct intel_connector *connector)
 {
+       struct drm_i915_private *i915 = to_i915(connector->base.dev);
        struct intel_panel *panel = &connector->panel;
        struct backlight_properties props;
 
@@ -1374,14 +1382,15 @@ int intel_backlight_device_register(struct intel_connector *connector)
                                          &intel_backlight_device_ops, &props);
 
        if (IS_ERR(panel->backlight.device)) {
-               DRM_ERROR("Failed to register backlight: %ld\n",
-                         PTR_ERR(panel->backlight.device));
+               drm_err(&i915->drm, "Failed to register backlight: %ld\n",
+                       PTR_ERR(panel->backlight.device));
                panel->backlight.device = NULL;
                return -ENODEV;
        }
 
-       DRM_DEBUG_KMS("Connector %s backlight sysfs interface registered\n",
-                     connector->base.name);
+       drm_dbg_kms(&i915->drm,
+                   "Connector %s backlight sysfs interface registered\n",
+                   connector->base.name);
 
        return 0;
 }
@@ -1931,7 +1940,8 @@ static int pwm_setup_backlight(struct intel_connector *connector,
        return 0;
 }
 
-void intel_panel_update_backlight(struct intel_encoder *encoder,
+void intel_panel_update_backlight(struct intel_atomic_state *state,
+                                 struct intel_encoder *encoder,
                                  const struct intel_crtc_state *crtc_state,
                                  const struct drm_connector_state *conn_state)
 {
index cedeea443336c8e3dbe50f3b0459a925c41578f5..968b95281cb415845a6100e42d3f37041882da9d 100644 (file)
@@ -25,19 +25,18 @@ int intel_panel_init(struct intel_panel *panel,
 void intel_panel_fini(struct intel_panel *panel);
 void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
                            struct drm_display_mode *adjusted_mode);
-void intel_pch_panel_fitting(struct intel_crtc *crtc,
-                            struct intel_crtc_state *pipe_config,
-                            int fitting_mode);
-void intel_gmch_panel_fitting(struct intel_crtc *crtc,
-                             struct intel_crtc_state *pipe_config,
-                             int fitting_mode);
+int intel_pch_panel_fitting(struct intel_crtc_state *crtc_state,
+                           const struct drm_connector_state *conn_state);
+int intel_gmch_panel_fitting(struct intel_crtc_state *crtc_state,
+                            const struct drm_connector_state *conn_state);
 void intel_panel_set_backlight_acpi(const struct drm_connector_state *conn_state,
                                    u32 level, u32 max);
 int intel_panel_setup_backlight(struct drm_connector *connector,
                                enum pipe pipe);
 void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
                                  const struct drm_connector_state *conn_state);
-void intel_panel_update_backlight(struct intel_encoder *encoder,
+void intel_panel_update_backlight(struct intel_atomic_state *state,
+                                 struct intel_encoder *encoder,
                                  const struct intel_crtc_state *crtc_state,
                                  const struct drm_connector_state *conn_state);
 void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state);
index fd9b146e3abac6146c0ae8867f7a0de68e44aac9..b7a2c102648a9e917cb790774fcefad606283391 100644 (file)
@@ -30,6 +30,7 @@
 #include "intel_display_types.h"
 #include "intel_psr.h"
 #include "intel_sprite.h"
+#include "intel_hdmi.h"
 
 /**
  * DOC: Panel Self Refresh (PSR/SRD)
@@ -137,41 +138,42 @@ static void psr_irq_control(struct drm_i915_private *dev_priv)
        intel_de_write(dev_priv, imr_reg, val);
 }
 
-static void psr_event_print(u32 val, bool psr2_enabled)
+static void psr_event_print(struct drm_i915_private *i915,
+                           u32 val, bool psr2_enabled)
 {
-       DRM_DEBUG_KMS("PSR exit events: 0x%x\n", val);
+       drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
        if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
-               DRM_DEBUG_KMS("\tPSR2 watchdog timer expired\n");
+               drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
        if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
-               DRM_DEBUG_KMS("\tPSR2 disabled\n");
+               drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
        if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
-               DRM_DEBUG_KMS("\tSU dirty FIFO underrun\n");
+               drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
        if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
-               DRM_DEBUG_KMS("\tSU CRC FIFO underrun\n");
+               drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
        if (val & PSR_EVENT_GRAPHICS_RESET)
-               DRM_DEBUG_KMS("\tGraphics reset\n");
+               drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
        if (val & PSR_EVENT_PCH_INTERRUPT)
-               DRM_DEBUG_KMS("\tPCH interrupt\n");
+               drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
        if (val & PSR_EVENT_MEMORY_UP)
-               DRM_DEBUG_KMS("\tMemory up\n");
+               drm_dbg_kms(&i915->drm, "\tMemory up\n");
        if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
-               DRM_DEBUG_KMS("\tFront buffer modification\n");
+               drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
        if (val & PSR_EVENT_WD_TIMER_EXPIRE)
-               DRM_DEBUG_KMS("\tPSR watchdog timer expired\n");
+               drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
        if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
-               DRM_DEBUG_KMS("\tPIPE registers updated\n");
+               drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
        if (val & PSR_EVENT_REGISTER_UPDATE)
-               DRM_DEBUG_KMS("\tRegister updated\n");
+               drm_dbg_kms(&i915->drm, "\tRegister updated\n");
        if (val & PSR_EVENT_HDCP_ENABLE)
-               DRM_DEBUG_KMS("\tHDCP enabled\n");
+               drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
        if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
-               DRM_DEBUG_KMS("\tKVMR session enabled\n");
+               drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
        if (val & PSR_EVENT_VBI_ENABLE)
-               DRM_DEBUG_KMS("\tVBI enabled\n");
+               drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
        if (val & PSR_EVENT_LPSP_MODE_EXIT)
-               DRM_DEBUG_KMS("\tLPSP mode exited\n");
+               drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
        if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
-               DRM_DEBUG_KMS("\tPSR disabled\n");
+               drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
 }
 
 void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
@@ -209,7 +211,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
 
                        intel_de_write(dev_priv, PSR_EVENT(cpu_transcoder),
                                       val);
-                       psr_event_print(val, psr2_enabled);
+                       psr_event_print(dev_priv, val, psr2_enabled);
                }
        }
 
@@ -249,18 +251,21 @@ static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
 
 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
 {
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
        u8 val = 8; /* assume the worst if we can't read the value */
 
        if (drm_dp_dpcd_readb(&intel_dp->aux,
                              DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
                val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
        else
-               DRM_DEBUG_KMS("Unable to get sink synchronization latency, assuming 8 frames\n");
+               drm_dbg_kms(&i915->drm,
+                           "Unable to get sink synchronization latency, assuming 8 frames\n");
        return val;
 }
 
 static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp)
 {
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
        u16 val;
        ssize_t r;
 
@@ -273,7 +278,8 @@ static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp)
 
        r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &val, 2);
        if (r != 2)
-               DRM_DEBUG_KMS("Unable to read DP_PSR2_SU_X_GRANULARITY\n");
+               drm_dbg_kms(&i915->drm,
+                           "Unable to read DP_PSR2_SU_X_GRANULARITY\n");
 
        /*
         * Spec says that if the value read is 0 the default granularity should
@@ -352,39 +358,6 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
        }
 }
 
-static void intel_psr_setup_vsc(struct intel_dp *intel_dp,
-                               const struct intel_crtc_state *crtc_state)
-{
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-       struct dp_sdp psr_vsc;
-
-       if (dev_priv->psr.psr2_enabled) {
-               /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
-               memset(&psr_vsc, 0, sizeof(psr_vsc));
-               psr_vsc.sdp_header.HB0 = 0;
-               psr_vsc.sdp_header.HB1 = 0x7;
-               if (dev_priv->psr.colorimetry_support) {
-                       psr_vsc.sdp_header.HB2 = 0x5;
-                       psr_vsc.sdp_header.HB3 = 0x13;
-               } else {
-                       psr_vsc.sdp_header.HB2 = 0x4;
-                       psr_vsc.sdp_header.HB3 = 0xe;
-               }
-       } else {
-               /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
-               memset(&psr_vsc, 0, sizeof(psr_vsc));
-               psr_vsc.sdp_header.HB0 = 0;
-               psr_vsc.sdp_header.HB1 = 0x7;
-               psr_vsc.sdp_header.HB2 = 0x2;
-               psr_vsc.sdp_header.HB3 = 0x8;
-       }
-
-       intel_dig_port->write_infoframe(&intel_dig_port->base,
-                                       crtc_state,
-                                       DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc));
-}
-
 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@@ -751,6 +724,8 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
        if (intel_dp != dev_priv->psr.dp)
                return;
 
+       if (!psr_global_enabled(dev_priv))
+               return;
        /*
         * HSW spec explicitly says PSR is tied to port A.
         * BDW+ platforms have a instance of PSR registers per transcoder but
@@ -793,6 +768,7 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
 
        crtc_state->has_psr = true;
        crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
+       crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
 }
 
 static void intel_psr_activate(struct intel_dp *intel_dp)
@@ -875,9 +851,12 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
 }
 
 static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
-                                   const struct intel_crtc_state *crtc_state)
+                                   const struct intel_crtc_state *crtc_state,
+                                   const struct drm_connector_state *conn_state)
 {
        struct intel_dp *intel_dp = dev_priv->psr.dp;
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct intel_encoder *encoder = &intel_dig_port->base;
        u32 val;
 
        drm_WARN_ON(&dev_priv->drm, dev_priv->psr.enabled);
@@ -916,7 +895,9 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
 
        drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
                    dev_priv->psr.psr2_enabled ? "2" : "1");
-       intel_psr_setup_vsc(intel_dp, crtc_state);
+       intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
+                                    &dev_priv->psr.vsc);
+       intel_write_dp_vsc_sdp(encoder, crtc_state, &dev_priv->psr.vsc);
        intel_psr_enable_sink(intel_dp);
        intel_psr_enable_source(intel_dp, crtc_state);
        dev_priv->psr.enabled = true;
@@ -928,11 +909,13 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
  * intel_psr_enable - Enable PSR
  * @intel_dp: Intel DP
  * @crtc_state: new CRTC state
+ * @conn_state: new CONNECTOR state
  *
  * This function can only be called after the pipe is fully trained and enabled.
  */
 void intel_psr_enable(struct intel_dp *intel_dp,
-                     const struct intel_crtc_state *crtc_state)
+                     const struct intel_crtc_state *crtc_state,
+                     const struct drm_connector_state *conn_state)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
@@ -953,7 +936,7 @@ void intel_psr_enable(struct intel_dp *intel_dp,
                goto unlock;
        }
 
-       intel_psr_enable_locked(dev_priv, crtc_state);
+       intel_psr_enable_locked(dev_priv, crtc_state, conn_state);
 
 unlock:
        mutex_unlock(&dev_priv->psr.lock);
@@ -1086,13 +1069,15 @@ static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
  * intel_psr_update - Update PSR state
  * @intel_dp: Intel DP
  * @crtc_state: new CRTC state
+ * @conn_state: new CONNECTOR state
  *
  * This functions will update PSR states, disabling, enabling or switching PSR
  * version when executing fastsets. For full modeset, intel_psr_disable() and
  * intel_psr_enable() should be called instead.
  */
 void intel_psr_update(struct intel_dp *intel_dp,
-                     const struct intel_crtc_state *crtc_state)
+                     const struct intel_crtc_state *crtc_state,
+                     const struct drm_connector_state *conn_state)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        struct i915_psr *psr = &dev_priv->psr;
@@ -1129,7 +1114,7 @@ void intel_psr_update(struct intel_dp *intel_dp,
                intel_psr_disable_locked(intel_dp);
 
        if (enable)
-               intel_psr_enable_locked(dev_priv, crtc_state);
+               intel_psr_enable_locked(dev_priv, crtc_state, conn_state);
 
 unlock:
        mutex_unlock(&dev_priv->psr.lock);
index 274fc6bb622122840de9eefe1e7c39d990253a6f..b4515186d5f46cc4e4e024e2f9a734e4601db603 100644 (file)
@@ -17,11 +17,13 @@ struct intel_dp;
 #define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support)
 void intel_psr_init_dpcd(struct intel_dp *intel_dp);
 void intel_psr_enable(struct intel_dp *intel_dp,
-                     const struct intel_crtc_state *crtc_state);
+                     const struct intel_crtc_state *crtc_state,
+                     const struct drm_connector_state *conn_state);
 void intel_psr_disable(struct intel_dp *intel_dp,
                       const struct intel_crtc_state *old_crtc_state);
 void intel_psr_update(struct intel_dp *intel_dp,
-                     const struct intel_crtc_state *crtc_state);
+                     const struct intel_crtc_state *crtc_state,
+                     const struct drm_connector_state *conn_state);
 int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 value);
 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
                          unsigned frontbuffer_bits,
index 637d8fe2f8c296460e6282e01640ebab3c4ba3ed..bc6c26818e152ba651e6f5ab91e7b9bac55cfed6 100644 (file)
@@ -1430,7 +1430,8 @@ static void intel_sdvo_update_props(struct intel_sdvo *intel_sdvo,
 #undef UPDATE_PROPERTY
 }
 
-static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
+static void intel_sdvo_pre_enable(struct intel_atomic_state *state,
+                                 struct intel_encoder *intel_encoder,
                                  const struct intel_crtc_state *crtc_state,
                                  const struct drm_connector_state *conn_state)
 {
@@ -1727,7 +1728,8 @@ static void intel_sdvo_enable_audio(struct intel_sdvo *intel_sdvo,
                                   SDVO_AUDIO_PRESENCE_DETECT);
 }
 
-static void intel_disable_sdvo(struct intel_encoder *encoder,
+static void intel_disable_sdvo(struct intel_atomic_state *state,
+                              struct intel_encoder *encoder,
                               const struct intel_crtc_state *old_crtc_state,
                               const struct drm_connector_state *conn_state)
 {
@@ -1775,20 +1777,23 @@ static void intel_disable_sdvo(struct intel_encoder *encoder,
        }
 }
 
-static void pch_disable_sdvo(struct intel_encoder *encoder,
+static void pch_disable_sdvo(struct intel_atomic_state *state,
+                            struct intel_encoder *encoder,
                             const struct intel_crtc_state *old_crtc_state,
                             const struct drm_connector_state *old_conn_state)
 {
 }
 
-static void pch_post_disable_sdvo(struct intel_encoder *encoder,
+static void pch_post_disable_sdvo(struct intel_atomic_state *state,
+                                 struct intel_encoder *encoder,
                                  const struct intel_crtc_state *old_crtc_state,
                                  const struct drm_connector_state *old_conn_state)
 {
-       intel_disable_sdvo(encoder, old_crtc_state, old_conn_state);
+       intel_disable_sdvo(state, encoder, old_crtc_state, old_conn_state);
 }
 
-static void intel_enable_sdvo(struct intel_encoder *encoder,
+static void intel_enable_sdvo(struct intel_atomic_state *state,
+                             struct intel_encoder *encoder,
                              const struct intel_crtc_state *pipe_config,
                              const struct drm_connector_state *conn_state)
 {
@@ -1934,12 +1939,11 @@ static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
 
 static enum intel_hotplug_state
 intel_sdvo_hotplug(struct intel_encoder *encoder,
-                  struct intel_connector *connector,
-                  bool irq_received)
+                  struct intel_connector *connector)
 {
        intel_sdvo_enable_hotplug(encoder);
 
-       return intel_encoder_hotplug(encoder, connector, irq_received);
+       return intel_encoder_hotplug(encoder, connector);
 }
 
 static bool
index 33d8861411382e2b38ebdb30e1c2d6fe0bd0ebce..0000ec7055f74f590e270b6e5ef54455957c4880 100644 (file)
@@ -2503,6 +2503,7 @@ static const u32 skl_plane_formats[] = {
        DRM_FORMAT_YVYU,
        DRM_FORMAT_UYVY,
        DRM_FORMAT_VYUY,
+       DRM_FORMAT_XYUV8888,
 };
 
 static const u32 skl_planar_formats[] = {
@@ -2521,6 +2522,7 @@ static const u32 skl_planar_formats[] = {
        DRM_FORMAT_UYVY,
        DRM_FORMAT_VYUY,
        DRM_FORMAT_NV12,
+       DRM_FORMAT_XYUV8888,
 };
 
 static const u32 glk_planar_formats[] = {
@@ -2539,6 +2541,7 @@ static const u32 glk_planar_formats[] = {
        DRM_FORMAT_UYVY,
        DRM_FORMAT_VYUY,
        DRM_FORMAT_NV12,
+       DRM_FORMAT_XYUV8888,
        DRM_FORMAT_P010,
        DRM_FORMAT_P012,
        DRM_FORMAT_P016,
@@ -2562,6 +2565,7 @@ static const u32 icl_sdr_y_plane_formats[] = {
        DRM_FORMAT_Y210,
        DRM_FORMAT_Y212,
        DRM_FORMAT_Y216,
+       DRM_FORMAT_XYUV8888,
        DRM_FORMAT_XVYU2101010,
        DRM_FORMAT_XVYU12_16161616,
        DRM_FORMAT_XVYU16161616,
@@ -2589,6 +2593,7 @@ static const u32 icl_sdr_uv_plane_formats[] = {
        DRM_FORMAT_Y210,
        DRM_FORMAT_Y212,
        DRM_FORMAT_Y216,
+       DRM_FORMAT_XYUV8888,
        DRM_FORMAT_XVYU2101010,
        DRM_FORMAT_XVYU12_16161616,
        DRM_FORMAT_XVYU16161616,
@@ -2620,6 +2625,7 @@ static const u32 icl_hdr_plane_formats[] = {
        DRM_FORMAT_Y210,
        DRM_FORMAT_Y212,
        DRM_FORMAT_Y216,
+       DRM_FORMAT_XYUV8888,
        DRM_FORMAT_XVYU2101010,
        DRM_FORMAT_XVYU12_16161616,
        DRM_FORMAT_XVYU16161616,
@@ -2790,6 +2796,7 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
        case DRM_FORMAT_UYVY:
        case DRM_FORMAT_VYUY:
        case DRM_FORMAT_NV12:
+       case DRM_FORMAT_XYUV8888:
        case DRM_FORMAT_P010:
        case DRM_FORMAT_P012:
        case DRM_FORMAT_P016:
@@ -2860,6 +2867,7 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
        case DRM_FORMAT_UYVY:
        case DRM_FORMAT_VYUY:
        case DRM_FORMAT_NV12:
+       case DRM_FORMAT_XYUV8888:
        case DRM_FORMAT_P010:
        case DRM_FORMAT_P012:
        case DRM_FORMAT_P016:
index 9b850c11aa78b95ee9f68fb681f3d1f5d0e47e98..b161c15baf8694a7e4b2f9238240a53a71cbb6b0 100644 (file)
@@ -34,6 +34,7 @@ tc_port_load_fia_params(struct drm_i915_private *i915,
        if (INTEL_INFO(i915)->display.has_modular_fia) {
                modular_fia = intel_uncore_read(&i915->uncore,
                                                PORT_TX_DFLEXDPSP(FIA1));
+               drm_WARN_ON(&i915->drm, modular_fia == 0xffffffff);
                modular_fia &= MODULAR_FIA_MASK;
        } else {
                modular_fia = 0;
@@ -52,6 +53,62 @@ tc_port_load_fia_params(struct drm_i915_private *i915,
        }
 }
 
+static enum intel_display_power_domain
+tc_cold_get_power_domain(struct intel_digital_port *dig_port)
+{
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+       if (INTEL_GEN(i915) == 11)
+               return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
+       else
+               return POWER_DOMAIN_TC_COLD_OFF;
+}
+
+static intel_wakeref_t
+tc_cold_block(struct intel_digital_port *dig_port)
+{
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+       enum intel_display_power_domain domain;
+
+       if (INTEL_GEN(i915) == 11 && !dig_port->tc_legacy_port)
+               return 0;
+
+       domain = tc_cold_get_power_domain(dig_port);
+       return intel_display_power_get(i915, domain);
+}
+
+static void
+tc_cold_unblock(struct intel_digital_port *dig_port, intel_wakeref_t wakeref)
+{
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+       enum intel_display_power_domain domain;
+
+       /*
+        * wakeref == -1, means some error happened saving save_depot_stack but
+        * power should still be put down and 0 is a invalid save_depot_stack
+        * id so can be used to skip it for non TC legacy ports.
+        */
+       if (wakeref == 0)
+               return;
+
+       domain = tc_cold_get_power_domain(dig_port);
+       intel_display_power_put_async(i915, domain, wakeref);
+}
+
+static void
+assert_tc_cold_blocked(struct intel_digital_port *dig_port)
+{
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+       bool enabled;
+
+       if (INTEL_GEN(i915) == 11 && !dig_port->tc_legacy_port)
+               return;
+
+       enabled = intel_display_power_is_enabled(i915,
+                                                tc_cold_get_power_domain(dig_port));
+       drm_WARN_ON(&i915->drm, !enabled);
+}
+
 u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
 {
        struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
@@ -62,6 +119,7 @@ u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
                                      PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
 
        drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff);
+       assert_tc_cold_blocked(dig_port);
 
        lane_mask &= DP_LANE_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx);
        return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
@@ -77,6 +135,7 @@ u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
                                     PORT_TX_DFLEXPA1(dig_port->tc_phy_fia));
 
        drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff);
+       assert_tc_cold_blocked(dig_port);
 
        return (pin_mask & DP_PIN_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx)) >>
               DP_PIN_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
@@ -91,6 +150,8 @@ int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port)
        if (dig_port->tc_mode != TC_PORT_DP_ALT)
                return 4;
 
+       assert_tc_cold_blocked(dig_port);
+
        lane_mask = 0;
        with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
                lane_mask = intel_tc_port_get_lane_mask(dig_port);
@@ -123,6 +184,8 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
        drm_WARN_ON(&i915->drm,
                    lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY);
 
+       assert_tc_cold_blocked(dig_port);
+
        val = intel_uncore_read(uncore,
                                PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia));
        val &= ~DFLEXDPMLE1_DPMLETC_MASK(dig_port->tc_phy_fia_idx);
@@ -152,6 +215,7 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
 static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port,
                                      u32 live_status_mask)
 {
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
        u32 valid_hpd_mask;
 
        if (dig_port->tc_legacy_port)
@@ -164,8 +228,9 @@ static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port,
                return;
 
        /* If live status mismatches the VBT flag, trust the live status. */
-       DRM_ERROR("Port %s: live status %08x mismatches the legacy port flag, fix flag\n",
-                 dig_port->tc_port_name, live_status_mask);
+       drm_err(&i915->drm,
+               "Port %s: live status %08x mismatches the legacy port flag, fix flag\n",
+               dig_port->tc_port_name, live_status_mask);
 
        dig_port->tc_legacy_port = !dig_port->tc_legacy_port;
 }
@@ -173,8 +238,8 @@ static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port,
 static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
 {
        struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
-       enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
        struct intel_uncore *uncore = &i915->uncore;
+       u32 isr_bit = i915->hotplug.pch_hpd[dig_port->base.hpd_pin];
        u32 mask = 0;
        u32 val;
 
@@ -193,7 +258,7 @@ static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
        if (val & TC_LIVE_STATE_TC(dig_port->tc_phy_fia_idx))
                mask |= BIT(TC_PORT_DP_ALT);
 
-       if (intel_uncore_read(uncore, SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port))
+       if (intel_uncore_read(uncore, SDEISR) & isr_bit)
                mask |= BIT(TC_PORT_LEGACY);
 
        /* The sink can be connected only in a single mode. */
@@ -233,8 +298,7 @@ static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port,
        if (val == 0xffffffff) {
                drm_dbg_kms(&i915->drm,
                            "Port %s: PHY in TCCOLD, can't set safe-mode to %s\n",
-                           dig_port->tc_port_name,
-                             enableddisabled(enable));
+                           dig_port->tc_port_name, enableddisabled(enable));
 
                return false;
        }
@@ -286,11 +350,12 @@ static bool icl_tc_phy_is_in_safe_mode(struct intel_digital_port *dig_port)
 static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
                               int required_lanes)
 {
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
        int max_lanes;
 
        if (!icl_tc_phy_status_complete(dig_port)) {
-               DRM_DEBUG_KMS("Port %s: PHY not ready\n",
-                             dig_port->tc_port_name);
+               drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n",
+                           dig_port->tc_port_name);
                goto out_set_tbt_alt_mode;
        }
 
@@ -311,15 +376,16 @@ static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
         * became disconnected. Not necessary for legacy mode.
         */
        if (!(tc_port_live_status_mask(dig_port) & BIT(TC_PORT_DP_ALT))) {
-               DRM_DEBUG_KMS("Port %s: PHY sudden disconnect\n",
-                             dig_port->tc_port_name);
+               drm_dbg_kms(&i915->drm, "Port %s: PHY sudden disconnect\n",
+                           dig_port->tc_port_name);
                goto out_set_safe_mode;
        }
 
        if (max_lanes < required_lanes) {
-               DRM_DEBUG_KMS("Port %s: PHY max lanes %d < required lanes %d\n",
-                             dig_port->tc_port_name,
-                             max_lanes, required_lanes);
+               drm_dbg_kms(&i915->drm,
+                           "Port %s: PHY max lanes %d < required lanes %d\n",
+                           dig_port->tc_port_name,
+                           max_lanes, required_lanes);
                goto out_set_safe_mode;
        }
 
@@ -357,15 +423,17 @@ static void icl_tc_phy_disconnect(struct intel_digital_port *dig_port)
 
 static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port)
 {
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
        if (!icl_tc_phy_status_complete(dig_port)) {
-               DRM_DEBUG_KMS("Port %s: PHY status not complete\n",
-                             dig_port->tc_port_name);
+               drm_dbg_kms(&i915->drm, "Port %s: PHY status not complete\n",
+                           dig_port->tc_port_name);
                return dig_port->tc_mode == TC_PORT_TBT_ALT;
        }
 
        if (icl_tc_phy_is_in_safe_mode(dig_port)) {
-               DRM_DEBUG_KMS("Port %s: PHY still in safe mode\n",
-                             dig_port->tc_port_name);
+               drm_dbg_kms(&i915->drm, "Port %s: PHY still in safe mode\n",
+                           dig_port->tc_port_name);
 
                return false;
        }
@@ -415,9 +483,14 @@ static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port,
        enum tc_port_mode old_tc_mode = dig_port->tc_mode;
 
        intel_display_power_flush_work(i915);
-       drm_WARN_ON(&i915->drm,
-                   intel_display_power_is_enabled(i915,
-                                       intel_aux_power_domain(dig_port)));
+       if (INTEL_GEN(i915) != 11 || !dig_port->tc_legacy_port) {
+               enum intel_display_power_domain aux_domain;
+               bool aux_powered;
+
+               aux_domain = intel_aux_power_domain(dig_port);
+               aux_powered = intel_display_power_is_enabled(i915, aux_domain);
+               drm_WARN_ON(&i915->drm, aux_powered);
+       }
 
        icl_tc_phy_disconnect(dig_port);
        icl_tc_phy_connect(dig_port, required_lanes);
@@ -438,10 +511,13 @@ intel_tc_port_link_init_refcount(struct intel_digital_port *dig_port,
 
 void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
 {
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
        struct intel_encoder *encoder = &dig_port->base;
+       intel_wakeref_t tc_cold_wref;
        int active_links = 0;
 
        mutex_lock(&dig_port->tc_lock);
+       tc_cold_wref = tc_cold_block(dig_port);
 
        dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
        if (dig_port->dp.is_mst)
@@ -451,8 +527,9 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
 
        if (active_links) {
                if (!icl_tc_phy_is_connected(dig_port))
-                       DRM_DEBUG_KMS("Port %s: PHY disconnected with %d active link(s)\n",
-                                     dig_port->tc_port_name, active_links);
+                       drm_dbg_kms(&i915->drm,
+                                   "Port %s: PHY disconnected with %d active link(s)\n",
+                                   dig_port->tc_port_name, active_links);
                intel_tc_port_link_init_refcount(dig_port, active_links);
 
                goto out;
@@ -462,10 +539,11 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
                icl_tc_phy_connect(dig_port, 1);
 
 out:
-       DRM_DEBUG_KMS("Port %s: sanitize mode (%s)\n",
-                     dig_port->tc_port_name,
-                     tc_port_mode_name(dig_port->tc_mode));
+       drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n",
+                   dig_port->tc_port_name,
+                   tc_port_mode_name(dig_port->tc_mode));
 
+       tc_cold_unblock(dig_port, tc_cold_wref);
        mutex_unlock(&dig_port->tc_lock);
 }
 
@@ -484,13 +562,19 @@ static bool intel_tc_port_needs_reset(struct intel_digital_port *dig_port)
  * connected ports are usable, and avoids exposing to the users objects they
  * can't really use.
  */
-bool intel_tc_port_connected(struct intel_digital_port *dig_port)
+bool intel_tc_port_connected(struct intel_encoder *encoder)
 {
+       struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
        bool is_connected;
+       intel_wakeref_t tc_cold_wref;
 
        intel_tc_port_lock(dig_port);
+       tc_cold_wref = tc_cold_block(dig_port);
+
        is_connected = tc_port_live_status_mask(dig_port) &
                       BIT(dig_port->tc_mode);
+
+       tc_cold_unblock(dig_port, tc_cold_wref);
        intel_tc_port_unlock(dig_port);
 
        return is_connected;
@@ -506,9 +590,16 @@ static void __intel_tc_port_lock(struct intel_digital_port *dig_port,
 
        mutex_lock(&dig_port->tc_lock);
 
-       if (!dig_port->tc_link_refcount &&
-           intel_tc_port_needs_reset(dig_port))
-               intel_tc_port_reset_mode(dig_port, required_lanes);
+       if (!dig_port->tc_link_refcount) {
+               intel_wakeref_t tc_cold_wref;
+
+               tc_cold_wref = tc_cold_block(dig_port);
+
+               if (intel_tc_port_needs_reset(dig_port))
+                       intel_tc_port_reset_mode(dig_port, required_lanes);
+
+               tc_cold_unblock(dig_port, tc_cold_wref);
+       }
 
        drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref);
        dig_port->tc_lock_wakeref = wakeref;
index 463f1b3c836f0aff18ec1d877d5040984e1a19de..b619e4736f8550645e396dff6b43cb3f95f6e582 100644 (file)
@@ -10,8 +10,9 @@
 #include <linux/types.h>
 
 struct intel_digital_port;
+struct intel_encoder;
 
-bool intel_tc_port_connected(struct intel_digital_port *dig_port);
+bool intel_tc_port_connected(struct intel_encoder *encoder);
 u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port);
 u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port);
 int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port);
index d2e3a3a323e9c05e1fd1080d2cff80d194246acc..fbe12aad7d58cc18be18ed058b396d283c6e54d0 100644 (file)
@@ -914,7 +914,8 @@ intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
 }
 
 static void
-intel_enable_tv(struct intel_encoder *encoder,
+intel_enable_tv(struct intel_atomic_state *state,
+               struct intel_encoder *encoder,
                const struct intel_crtc_state *pipe_config,
                const struct drm_connector_state *conn_state)
 {
@@ -930,7 +931,8 @@ intel_enable_tv(struct intel_encoder *encoder,
 }
 
 static void
-intel_disable_tv(struct intel_encoder *encoder,
+intel_disable_tv(struct intel_atomic_state *state,
+                struct intel_encoder *encoder,
                 const struct intel_crtc_state *old_crtc_state,
                 const struct drm_connector_state *old_conn_state)
 {
@@ -1414,7 +1416,8 @@ static void set_color_conversion(struct drm_i915_private *dev_priv,
                       (color_conversion->bv << 16) | color_conversion->av);
 }
 
-static void intel_tv_pre_enable(struct intel_encoder *encoder,
+static void intel_tv_pre_enable(struct intel_atomic_state *state,
+                               struct intel_encoder *encoder,
                                const struct intel_crtc_state *pipe_config,
                                const struct drm_connector_state *conn_state)
 {
@@ -1698,13 +1701,13 @@ intel_tv_detect(struct drm_connector *connector,
                struct drm_modeset_acquire_ctx *ctx,
                bool force)
 {
+       struct drm_i915_private *i915 = to_i915(connector->dev);
        struct intel_tv *intel_tv = intel_attached_tv(to_intel_connector(connector));
        enum drm_connector_status status;
        int type;
 
-       DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
-                     connector->base.id, connector->name,
-                     force);
+       drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] force=%d\n",
+                   connector->base.id, connector->name, force);
 
        if (force) {
                struct intel_load_detect_pipe tmp;
index 05c7cbe32eb406eb26fc59353f17ec672e4ba66f..aef7fe932d1a505f609b11b85b8b03a439bebedd 100644 (file)
@@ -462,7 +462,7 @@ struct bdb_general_definitions {
         * number = (block_size - sizeof(bdb_general_definitions))/
         *           defs->child_dev_size;
         */
-       u8 devices[0];
+       u8 devices[];
 } __packed;
 
 /*
@@ -839,7 +839,7 @@ struct bdb_mipi_config {
 
 struct bdb_mipi_sequence {
        u8 version;
-       u8 data[0]; /* up to 6 variable length blocks */
+       u8 data[]; /* up to 6 variable length blocks */
 } __packed;
 
 /*
index f4c362dc6e15f809a7553a9746a450d971bf6a7a..f582ab52f0b08e8e1247bbefc9a2f2dff9022832 100644 (file)
@@ -267,7 +267,6 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
        struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
                                                   base);
        struct intel_connector *intel_connector = intel_dsi->attached_connector;
-       struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
        const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
        struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
        int ret;
@@ -279,11 +278,11 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
                intel_fixed_panel_mode(fixed_mode, adjusted_mode);
 
                if (HAS_GMCH(dev_priv))
-                       intel_gmch_panel_fitting(crtc, pipe_config,
-                                                conn_state->scaling_mode);
+                       ret = intel_gmch_panel_fitting(pipe_config, conn_state);
                else
-                       intel_pch_panel_fitting(crtc, pipe_config,
-                                               conn_state->scaling_mode);
+                       ret = intel_pch_panel_fitting(pipe_config, conn_state);
+               if (ret)
+                       return ret;
        }
 
        if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -759,7 +758,8 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder);
  * DSI port enable has to be done before pipe and plane enable, so we do it in
  * the pre_enable hook instead of the enable hook.
  */
-static void intel_dsi_pre_enable(struct intel_encoder *encoder,
+static void intel_dsi_pre_enable(struct intel_atomic_state *state,
+                                struct intel_encoder *encoder,
                                 const struct intel_crtc_state *pipe_config,
                                 const struct drm_connector_state *conn_state)
 {
@@ -858,11 +858,12 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
        intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
 }
 
-static void bxt_dsi_enable(struct intel_encoder *encoder,
+static void bxt_dsi_enable(struct intel_atomic_state *state,
+                          struct intel_encoder *encoder,
                           const struct intel_crtc_state *crtc_state,
                           const struct drm_connector_state *conn_state)
 {
-       WARN_ON(crtc_state->has_pch_encoder);
+       drm_WARN_ON(state->base.dev, crtc_state->has_pch_encoder);
 
        intel_crtc_vblank_on(crtc_state);
 }
@@ -871,14 +872,16 @@ static void bxt_dsi_enable(struct intel_encoder *encoder,
  * DSI port disable has to be done after pipe and plane disable, so we do it in
  * the post_disable hook.
  */
-static void intel_dsi_disable(struct intel_encoder *encoder,
+static void intel_dsi_disable(struct intel_atomic_state *state,
+                             struct intel_encoder *encoder,
                              const struct intel_crtc_state *old_crtc_state,
                              const struct drm_connector_state *old_conn_state)
 {
+       struct drm_i915_private *i915 = to_i915(encoder->base.dev);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
        enum port port;
 
-       DRM_DEBUG_KMS("\n");
+       drm_dbg_kms(&i915->drm, "\n");
 
        intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
        intel_panel_disable_backlight(old_conn_state);
@@ -906,7 +909,8 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
                vlv_dsi_clear_device_ready(encoder);
 }
 
-static void intel_dsi_post_disable(struct intel_encoder *encoder,
+static void intel_dsi_post_disable(struct intel_atomic_state *state,
+                                  struct intel_encoder *encoder,
                                   const struct intel_crtc_state *old_crtc_state,
                                   const struct drm_connector_state *old_conn_state)
 {
index 34be4c0ee7c59a3d9b64a8219d224f28ed6c52ca..bc02237169064d615c38d3bbe43f0804d75b59bb 100644 (file)
@@ -108,7 +108,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
        if (clflush) {
                i915_sw_fence_await_reservation(&clflush->base.chain,
                                                obj->base.resv, NULL, true,
-                                               I915_FENCE_TIMEOUT,
+                                               i915_fence_timeout(to_i915(obj->base.dev)),
                                                I915_FENCE_GFP);
                dma_resv_add_excl_fence(obj->base.resv, &clflush->base.dma);
                dma_fence_work_commit(&clflush->base);
index 0598e5382a1def8df146d15af324ab4e385d052d..d3a86a4d5c0417d683511151414c1920ac99ba51 100644 (file)
@@ -6,7 +6,6 @@
 #include "i915_drv.h"
 #include "gt/intel_context.h"
 #include "gt/intel_engine_pm.h"
-#include "gt/intel_engine_pool.h"
 #include "i915_gem_client_blt.h"
 #include "i915_gem_object_blt.h"
 
@@ -289,8 +288,7 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
 
        i915_gem_object_lock(obj);
        err = i915_sw_fence_await_reservation(&work->wait,
-                                             obj->base.resv, NULL,
-                                             true, I915_FENCE_TIMEOUT,
+                                             obj->base.resv, NULL, true, 0,
                                              I915_FENCE_GFP);
        if (err < 0) {
                dma_fence_set_error(&work->dma, err);
index 68326ad3b2e09e7f3dcc1107d1d425c4fe86c653..900ea8b7fc8fb5b0cc22cc4f26c2885719ca71e9 100644 (file)
@@ -130,9 +130,7 @@ static void lut_close(struct i915_gem_context *ctx)
                if (&lut->obj_link != &obj->lut_list) {
                        i915_lut_handle_free(lut);
                        radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
-                       if (atomic_dec_and_test(&vma->open_count) &&
-                           !i915_vma_is_ggtt(vma))
-                               i915_vma_close(vma);
+                       i915_vma_close(vma);
                        i915_gem_object_put(obj);
                }
 
@@ -570,23 +568,19 @@ static void engines_idle_release(struct i915_gem_context *ctx,
        engines->ctx = i915_gem_context_get(ctx);
 
        for_each_gem_engine(ce, engines, it) {
-               struct dma_fence *fence;
-               int err = 0;
+               int err;
 
                /* serialises with execbuf */
                set_bit(CONTEXT_CLOSED_BIT, &ce->flags);
                if (!intel_context_pin_if_active(ce))
                        continue;
 
-               fence = i915_active_fence_get(&ce->timeline->last_request);
-               if (fence) {
-                       err = i915_sw_fence_await_dma_fence(&engines->fence,
-                                                           fence, 0,
-                                                           GFP_KERNEL);
-                       dma_fence_put(fence);
-               }
+               /* Wait until context is finally scheduled out and retired */
+               err = i915_sw_fence_await_active(&engines->fence,
+                                                &ce->active,
+                                                I915_ACTIVE_AWAIT_BARRIER);
                intel_context_unpin(ce);
-               if (err < 0)
+               if (err)
                        goto kill;
        }
 
@@ -757,21 +751,46 @@ err_free:
        return ERR_PTR(err);
 }
 
+static inline struct i915_gem_engines *
+__context_engines_await(const struct i915_gem_context *ctx)
+{
+       struct i915_gem_engines *engines;
+
+       rcu_read_lock();
+       do {
+               engines = rcu_dereference(ctx->engines);
+               GEM_BUG_ON(!engines);
+
+               if (unlikely(!i915_sw_fence_await(&engines->fence)))
+                       continue;
+
+               if (likely(engines == rcu_access_pointer(ctx->engines)))
+                       break;
+
+               i915_sw_fence_complete(&engines->fence);
+       } while (1);
+       rcu_read_unlock();
+
+       return engines;
+}
+
 static int
 context_apply_all(struct i915_gem_context *ctx,
                  int (*fn)(struct intel_context *ce, void *data),
                  void *data)
 {
        struct i915_gem_engines_iter it;
+       struct i915_gem_engines *e;
        struct intel_context *ce;
        int err = 0;
 
-       for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+       e = __context_engines_await(ctx);
+       for_each_gem_engine(ce, e, it) {
                err = fn(ce, data);
                if (err)
                        break;
        }
-       i915_gem_context_unlock_engines(ctx);
+       i915_sw_fence_complete(&e->fence);
 
        return err;
 }
@@ -786,11 +805,13 @@ static int __apply_ppgtt(struct intel_context *ce, void *vm)
 static struct i915_address_space *
 __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
 {
-       struct i915_address_space *old = i915_gem_context_vm(ctx);
+       struct i915_address_space *old;
 
+       old = rcu_replace_pointer(ctx->vm,
+                                 i915_vm_open(vm),
+                                 lockdep_is_held(&ctx->mutex));
        GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old));
 
-       rcu_assign_pointer(ctx->vm, i915_vm_open(vm));
        context_apply_all(ctx, __apply_ppgtt, vm);
 
        return old;
@@ -1069,30 +1090,6 @@ static void cb_retire(struct i915_active *base)
        kfree(cb);
 }
 
-static inline struct i915_gem_engines *
-__context_engines_await(const struct i915_gem_context *ctx)
-{
-       struct i915_gem_engines *engines;
-
-       rcu_read_lock();
-       do {
-               engines = rcu_dereference(ctx->engines);
-               if (unlikely(!engines))
-                       break;
-
-               if (unlikely(!i915_sw_fence_await(&engines->fence)))
-                       continue;
-
-               if (likely(engines == rcu_access_pointer(ctx->engines)))
-                       break;
-
-               i915_sw_fence_complete(&engines->fence);
-       } while (1);
-       rcu_read_unlock();
-
-       return engines;
-}
-
 I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
 static int context_barrier_task(struct i915_gem_context *ctx,
                                intel_engine_mask_t engines,
@@ -1401,10 +1398,10 @@ static int get_ringsize(struct i915_gem_context *ctx,
        return 0;
 }
 
-static int
-user_to_context_sseu(struct drm_i915_private *i915,
-                    const struct drm_i915_gem_context_param_sseu *user,
-                    struct intel_sseu *context)
+int
+i915_gem_user_to_context_sseu(struct drm_i915_private *i915,
+                             const struct drm_i915_gem_context_param_sseu *user,
+                             struct intel_sseu *context)
 {
        const struct sseu_dev_info *device = &RUNTIME_INFO(i915)->sseu;
 
@@ -1539,7 +1536,7 @@ static int set_sseu(struct i915_gem_context *ctx,
                goto out_ce;
        }
 
-       ret = user_to_context_sseu(i915, &user_sseu, &sseu);
+       ret = i915_gem_user_to_context_sseu(i915, &user_sseu, &sseu);
        if (ret)
                goto out_ce;
 
index f1d884d304bd6fa8cf53d31d60c084d70d4ffabe..3702b2fb27abd73103b078f800aeb4e8794fd2ae 100644 (file)
@@ -225,4 +225,8 @@ i915_gem_engines_iter_next(struct i915_gem_engines_iter *it);
 struct i915_lut_handle *i915_lut_handle_alloc(void);
 void i915_lut_handle_free(struct i915_lut_handle *lut);
 
+int i915_gem_user_to_context_sseu(struct drm_i915_private *i915,
+                                 const struct drm_i915_gem_context_param_sseu *user,
+                                 struct intel_sseu *context);
+
 #endif /* !__I915_GEM_CONTEXT_H__ */
index 4f96c8788a2ec1b396b7b11a35dc377e787260ec..7f76fc68f498aa872a208166023ddfb4f98cf347 100644 (file)
@@ -368,7 +368,7 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
        struct i915_vma *vma;
 
-       if (!atomic_read(&obj->bind_count))
+       if (list_empty(&obj->vma.list))
                return;
 
        mutex_lock(&i915->ggtt.vm.mutex);
index 8a4e9c1cbf6c1262d9e68e85760b899e502d46bf..3ce185670ca47da9304999ce658fad991d237941 100644 (file)
@@ -15,8 +15,8 @@
 
 #include "gem/i915_gem_ioctls.h"
 #include "gt/intel_context.h"
-#include "gt/intel_engine_pool.h"
 #include "gt/intel_gt.h"
+#include "gt/intel_gt_buffer_pool.h"
 #include "gt/intel_gt_pm.h"
 #include "gt/intel_ring.h"
 
@@ -40,6 +40,11 @@ struct eb_vma {
        u32 handle;
 };
 
+struct eb_vma_array {
+       struct kref kref;
+       struct eb_vma vma[];
+};
+
 enum {
        FORCE_CPU_RELOC = 1,
        FORCE_GTT_RELOC,
@@ -52,7 +57,6 @@ enum {
 #define __EXEC_OBJECT_NEEDS_MAP                BIT(29)
 #define __EXEC_OBJECT_NEEDS_BIAS       BIT(28)
 #define __EXEC_OBJECT_INTERNAL_FLAGS   (~0u << 28) /* all of the above */
-#define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE)
 
 #define __EXEC_HAS_RELOC       BIT(31)
 #define __EXEC_INTERNAL_FLAGS  (~0u << 31)
@@ -264,7 +268,9 @@ struct i915_execbuffer {
                bool has_fence : 1;
                bool needs_unfenced : 1;
 
+               struct i915_vma *target;
                struct i915_request *rq;
+               struct i915_vma *rq_vma;
                u32 *rq_cmd;
                unsigned int rq_size;
        } reloc_cache;
@@ -283,6 +289,7 @@ struct i915_execbuffer {
         */
        int lut_size;
        struct hlist_head *buckets; /** ht for relocation handles */
+       struct eb_vma_array *array;
 };
 
 static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
@@ -292,8 +299,62 @@ static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
                 eb->args->batch_len);
 }
 
+static struct eb_vma_array *eb_vma_array_create(unsigned int count)
+{
+       struct eb_vma_array *arr;
+
+       arr = kvmalloc(struct_size(arr, vma, count), GFP_KERNEL | __GFP_NOWARN);
+       if (!arr)
+               return NULL;
+
+       kref_init(&arr->kref);
+       arr->vma[0].vma = NULL;
+
+       return arr;
+}
+
+static inline void eb_unreserve_vma(struct eb_vma *ev)
+{
+       struct i915_vma *vma = ev->vma;
+
+       if (unlikely(ev->flags & __EXEC_OBJECT_HAS_FENCE))
+               __i915_vma_unpin_fence(vma);
+
+       if (ev->flags & __EXEC_OBJECT_HAS_PIN)
+               __i915_vma_unpin(vma);
+
+       ev->flags &= ~(__EXEC_OBJECT_HAS_PIN |
+                      __EXEC_OBJECT_HAS_FENCE);
+}
+
+static void eb_vma_array_destroy(struct kref *kref)
+{
+       struct eb_vma_array *arr = container_of(kref, typeof(*arr), kref);
+       struct eb_vma *ev = arr->vma;
+
+       while (ev->vma) {
+               eb_unreserve_vma(ev);
+               i915_vma_put(ev->vma);
+               ev++;
+       }
+
+       kvfree(arr);
+}
+
+static void eb_vma_array_put(struct eb_vma_array *arr)
+{
+       kref_put(&arr->kref, eb_vma_array_destroy);
+}
+
 static int eb_create(struct i915_execbuffer *eb)
 {
+       /* Allocate an extra slot for use by the command parser + sentinel */
+       eb->array = eb_vma_array_create(eb->buffer_count + 2);
+       if (!eb->array)
+               return -ENOMEM;
+
+       eb->vma = eb->array->vma;
+
        if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) {
                unsigned int size = 1 + ilog2(eb->buffer_count);
 
@@ -327,8 +388,10 @@ static int eb_create(struct i915_execbuffer *eb)
                                break;
                } while (--size);
 
-               if (unlikely(!size))
+               if (unlikely(!size)) {
+                       eb_vma_array_put(eb->array);
                        return -ENOMEM;
+               }
 
                eb->lut_size = size;
        } else {
@@ -368,6 +431,32 @@ eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
        return false;
 }
 
+static u64 eb_pin_flags(const struct drm_i915_gem_exec_object2 *entry,
+                       unsigned int exec_flags)
+{
+       u64 pin_flags = 0;
+
+       if (exec_flags & EXEC_OBJECT_NEEDS_GTT)
+               pin_flags |= PIN_GLOBAL;
+
+       /*
+        * Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
+        * limit address to the first 4GBs for unflagged objects.
+        */
+       if (!(exec_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
+               pin_flags |= PIN_ZONE_4G;
+
+       if (exec_flags & __EXEC_OBJECT_NEEDS_MAP)
+               pin_flags |= PIN_MAPPABLE;
+
+       if (exec_flags & EXEC_OBJECT_PINNED)
+               pin_flags |= entry->offset | PIN_OFFSET_FIXED;
+       else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS)
+               pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
+
+       return pin_flags;
+}
+
 static inline bool
 eb_pin_vma(struct i915_execbuffer *eb,
           const struct drm_i915_gem_exec_object2 *entry,
@@ -385,8 +474,19 @@ eb_pin_vma(struct i915_execbuffer *eb,
        if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_GTT))
                pin_flags |= PIN_GLOBAL;
 
-       if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags)))
-               return false;
+       /* Attempt to reuse the current location if available */
+       if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags))) {
+               if (entry->flags & EXEC_OBJECT_PINNED)
+                       return false;
+
+               /* Failing that pick any _free_ space if suitable */
+               if (unlikely(i915_vma_pin(vma,
+                                         entry->pad_to_size,
+                                         entry->alignment,
+                                         eb_pin_flags(entry, ev->flags) |
+                                         PIN_USER | PIN_NOEVICT)))
+                       return false;
+       }
 
        if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) {
                if (unlikely(i915_vma_pin_fence(vma))) {
@@ -402,26 +502,6 @@ eb_pin_vma(struct i915_execbuffer *eb,
        return !eb_vma_misplaced(entry, vma, ev->flags);
 }
 
-static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags)
-{
-       GEM_BUG_ON(!(flags & __EXEC_OBJECT_HAS_PIN));
-
-       if (unlikely(flags & __EXEC_OBJECT_HAS_FENCE))
-               __i915_vma_unpin_fence(vma);
-
-       __i915_vma_unpin(vma);
-}
-
-static inline void
-eb_unreserve_vma(struct eb_vma *ev)
-{
-       if (!(ev->flags & __EXEC_OBJECT_HAS_PIN))
-               return;
-
-       __eb_unreserve_vma(ev->vma, ev->flags);
-       ev->flags &= ~__EXEC_OBJECT_RESERVED;
-}
-
 static int
 eb_validate_vma(struct i915_execbuffer *eb,
                struct drm_i915_gem_exec_object2 *entry,
@@ -481,7 +561,7 @@ eb_add_vma(struct i915_execbuffer *eb,
 
        GEM_BUG_ON(i915_vma_is_closed(vma));
 
-       ev->vma = i915_vma_get(vma);
+       ev->vma = vma;
        ev->exec = entry;
        ev->flags = entry->flags;
 
@@ -547,28 +627,9 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
                          u64 pin_flags)
 {
        struct drm_i915_gem_exec_object2 *entry = ev->exec;
-       unsigned int exec_flags = ev->flags;
        struct i915_vma *vma = ev->vma;
        int err;
 
-       if (exec_flags & EXEC_OBJECT_NEEDS_GTT)
-               pin_flags |= PIN_GLOBAL;
-
-       /*
-        * Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
-        * limit address to the first 4GBs for unflagged objects.
-        */
-       if (!(exec_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
-               pin_flags |= PIN_ZONE_4G;
-
-       if (exec_flags & __EXEC_OBJECT_NEEDS_MAP)
-               pin_flags |= PIN_MAPPABLE;
-
-       if (exec_flags & EXEC_OBJECT_PINNED)
-               pin_flags |= entry->offset | PIN_OFFSET_FIXED;
-       else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS)
-               pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
-
        if (drm_mm_node_allocated(&vma->node) &&
            eb_vma_misplaced(entry, vma, ev->flags)) {
                err = i915_vma_unbind(vma);
@@ -578,7 +639,7 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
 
        err = i915_vma_pin(vma,
                           entry->pad_to_size, entry->alignment,
-                          pin_flags);
+                          eb_pin_flags(entry, ev->flags) | pin_flags);
        if (err)
                return err;
 
@@ -587,7 +648,7 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
                eb->args->flags |= __EXEC_HAS_RELOC;
        }
 
-       if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
+       if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) {
                err = i915_vma_pin_fence(vma);
                if (unlikely(err)) {
                        i915_vma_unpin(vma);
@@ -595,10 +656,10 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
                }
 
                if (vma->fence)
-                       exec_flags |= __EXEC_OBJECT_HAS_FENCE;
+                       ev->flags |= __EXEC_OBJECT_HAS_FENCE;
        }
 
-       ev->flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
+       ev->flags |= __EXEC_OBJECT_HAS_PIN;
        GEM_BUG_ON(eb_vma_misplaced(entry, vma, ev->flags));
 
        return 0;
@@ -728,77 +789,117 @@ static int eb_select_context(struct i915_execbuffer *eb)
        return 0;
 }
 
-static int eb_lookup_vmas(struct i915_execbuffer *eb)
+static int __eb_add_lut(struct i915_execbuffer *eb,
+                       u32 handle, struct i915_vma *vma)
 {
-       struct radix_tree_root *handles_vma = &eb->gem_context->handles_vma;
-       struct drm_i915_gem_object *obj;
-       unsigned int i, batch;
+       struct i915_gem_context *ctx = eb->gem_context;
+       struct i915_lut_handle *lut;
        int err;
 
-       if (unlikely(i915_gem_context_is_closed(eb->gem_context)))
-               return -ENOENT;
+       lut = i915_lut_handle_alloc();
+       if (unlikely(!lut))
+               return -ENOMEM;
 
-       INIT_LIST_HEAD(&eb->relocs);
-       INIT_LIST_HEAD(&eb->unbound);
+       i915_vma_get(vma);
+       if (!atomic_fetch_inc(&vma->open_count))
+               i915_vma_reopen(vma);
+       lut->handle = handle;
+       lut->ctx = ctx;
+
+       /* Check that the context hasn't been closed in the meantime */
+       err = -EINTR;
+       if (!mutex_lock_interruptible(&ctx->mutex)) {
+               err = -ENOENT;
+               if (likely(!i915_gem_context_is_closed(ctx)))
+                       err = radix_tree_insert(&ctx->handles_vma, handle, vma);
+               if (err == 0) { /* And nor has this handle */
+                       struct drm_i915_gem_object *obj = vma->obj;
+
+                       i915_gem_object_lock(obj);
+                       if (idr_find(&eb->file->object_idr, handle) == obj) {
+                               list_add(&lut->obj_link, &obj->lut_list);
+                       } else {
+                               radix_tree_delete(&ctx->handles_vma, handle);
+                               err = -ENOENT;
+                       }
+                       i915_gem_object_unlock(obj);
+               }
+               mutex_unlock(&ctx->mutex);
+       }
+       if (unlikely(err))
+               goto err;
 
-       batch = eb_batch_index(eb);
+       return 0;
 
-       for (i = 0; i < eb->buffer_count; i++) {
-               u32 handle = eb->exec[i].handle;
-               struct i915_lut_handle *lut;
+err:
+       i915_vma_close(vma);
+       i915_vma_put(vma);
+       i915_lut_handle_free(lut);
+       return err;
+}
+
+static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
+{
+       do {
+               struct drm_i915_gem_object *obj;
                struct i915_vma *vma;
+               int err;
 
-               vma = radix_tree_lookup(handles_vma, handle);
+               rcu_read_lock();
+               vma = radix_tree_lookup(&eb->gem_context->handles_vma, handle);
                if (likely(vma))
-                       goto add_vma;
+                       vma = i915_vma_tryget(vma);
+               rcu_read_unlock();
+               if (likely(vma))
+                       return vma;
 
                obj = i915_gem_object_lookup(eb->file, handle);
-               if (unlikely(!obj)) {
-                       err = -ENOENT;
-                       goto err_vma;
-               }
+               if (unlikely(!obj))
+                       return ERR_PTR(-ENOENT);
 
                vma = i915_vma_instance(obj, eb->context->vm, NULL);
                if (IS_ERR(vma)) {
-                       err = PTR_ERR(vma);
-                       goto err_obj;
+                       i915_gem_object_put(obj);
+                       return vma;
                }
 
-               lut = i915_lut_handle_alloc();
-               if (unlikely(!lut)) {
-                       err = -ENOMEM;
-                       goto err_obj;
-               }
+               err = __eb_add_lut(eb, handle, vma);
+               if (likely(!err))
+                       return vma;
 
-               err = radix_tree_insert(handles_vma, handle, vma);
-               if (unlikely(err)) {
-                       i915_lut_handle_free(lut);
-                       goto err_obj;
-               }
+               i915_gem_object_put(obj);
+               if (err != -EEXIST)
+                       return ERR_PTR(err);
+       } while (1);
+}
 
-               /* transfer ref to lut */
-               if (!atomic_fetch_inc(&vma->open_count))
-                       i915_vma_reopen(vma);
-               lut->handle = handle;
-               lut->ctx = eb->gem_context;
+static int eb_lookup_vmas(struct i915_execbuffer *eb)
+{
+       unsigned int batch = eb_batch_index(eb);
+       unsigned int i;
+       int err = 0;
 
-               i915_gem_object_lock(obj);
-               list_add(&lut->obj_link, &obj->lut_list);
-               i915_gem_object_unlock(obj);
+       INIT_LIST_HEAD(&eb->relocs);
+       INIT_LIST_HEAD(&eb->unbound);
+
+       for (i = 0; i < eb->buffer_count; i++) {
+               struct i915_vma *vma;
+
+               vma = eb_lookup_vma(eb, eb->exec[i].handle);
+               if (IS_ERR(vma)) {
+                       err = PTR_ERR(vma);
+                       break;
+               }
 
-add_vma:
                err = eb_validate_vma(eb, &eb->exec[i], vma);
-               if (unlikely(err))
-                       goto err_vma;
+               if (unlikely(err)) {
+                       i915_vma_put(vma);
+                       break;
+               }
 
                eb_add_vma(eb, i, batch, vma);
        }
 
-       return 0;
-
-err_obj:
-       i915_gem_object_put(obj);
-err_vma:
        eb->vma[i].vma = NULL;
        return err;
 }
@@ -823,31 +924,13 @@ eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
        }
 }
 
-static void eb_release_vmas(const struct i915_execbuffer *eb)
-{
-       const unsigned int count = eb->buffer_count;
-       unsigned int i;
-
-       for (i = 0; i < count; i++) {
-               struct eb_vma *ev = &eb->vma[i];
-               struct i915_vma *vma = ev->vma;
-
-               if (!vma)
-                       break;
-
-               eb->vma[i].vma = NULL;
-
-               if (ev->flags & __EXEC_OBJECT_HAS_PIN)
-                       __eb_unreserve_vma(vma, ev->flags);
-
-               i915_vma_put(vma);
-       }
-}
-
 static void eb_destroy(const struct i915_execbuffer *eb)
 {
        GEM_BUG_ON(eb->reloc_cache.rq);
 
+       if (eb->array)
+               eb_vma_array_put(eb->array);
+
        if (eb->lut_size > 0)
                kfree(eb->buckets);
 }
@@ -872,7 +955,7 @@ static void reloc_cache_init(struct reloc_cache *cache,
        cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
        cache->node.flags = 0;
        cache->rq = NULL;
-       cache->rq_size = 0;
+       cache->target = NULL;
 }
 
 static inline void *unmask_page(unsigned long p)
@@ -894,29 +977,122 @@ static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
        return &i915->ggtt;
 }
 
-static void reloc_gpu_flush(struct reloc_cache *cache)
+#define RELOC_TAIL 4
+
+static int reloc_gpu_chain(struct reloc_cache *cache)
+{
+       struct intel_gt_buffer_pool_node *pool;
+       struct i915_request *rq = cache->rq;
+       struct i915_vma *batch;
+       u32 *cmd;
+       int err;
+
+       pool = intel_gt_get_buffer_pool(rq->engine->gt, PAGE_SIZE);
+       if (IS_ERR(pool))
+               return PTR_ERR(pool);
+
+       batch = i915_vma_instance(pool->obj, rq->context->vm, NULL);
+       if (IS_ERR(batch)) {
+               err = PTR_ERR(batch);
+               goto out_pool;
+       }
+
+       err = i915_vma_pin(batch, 0, 0, PIN_USER | PIN_NONBLOCK);
+       if (err)
+               goto out_pool;
+
+       GEM_BUG_ON(cache->rq_size + RELOC_TAIL > PAGE_SIZE  / sizeof(u32));
+       cmd = cache->rq_cmd + cache->rq_size;
+       *cmd++ = MI_ARB_CHECK;
+       if (cache->gen >= 8)
+               *cmd++ = MI_BATCH_BUFFER_START_GEN8;
+       else if (cache->gen >= 6)
+               *cmd++ = MI_BATCH_BUFFER_START;
+       else
+               *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
+       *cmd++ = lower_32_bits(batch->node.start);
+       *cmd++ = upper_32_bits(batch->node.start); /* Always 0 for gen<8 */
+       i915_gem_object_flush_map(cache->rq_vma->obj);
+       i915_gem_object_unpin_map(cache->rq_vma->obj);
+       cache->rq_vma = NULL;
+
+       err = intel_gt_buffer_pool_mark_active(pool, rq);
+       if (err == 0) {
+               i915_vma_lock(batch);
+               err = i915_request_await_object(rq, batch->obj, false);
+               if (err == 0)
+                       err = i915_vma_move_to_active(batch, rq, 0);
+               i915_vma_unlock(batch);
+       }
+       i915_vma_unpin(batch);
+       if (err)
+               goto out_pool;
+
+       cmd = i915_gem_object_pin_map(batch->obj,
+                                     cache->has_llc ?
+                                     I915_MAP_FORCE_WB :
+                                     I915_MAP_FORCE_WC);
+       if (IS_ERR(cmd)) {
+               err = PTR_ERR(cmd);
+               goto out_pool;
+       }
+
+       /* Return with batch mapping (cmd) still pinned */
+       cache->rq_cmd = cmd;
+       cache->rq_size = 0;
+       cache->rq_vma = batch;
+
+out_pool:
+       intel_gt_buffer_pool_put(pool);
+       return err;
+}
+
+static unsigned int reloc_bb_flags(const struct reloc_cache *cache)
 {
-       struct drm_i915_gem_object *obj = cache->rq->batch->obj;
+       return cache->gen > 5 ? 0 : I915_DISPATCH_SECURE;
+}
+
+static int reloc_gpu_flush(struct reloc_cache *cache)
+{
+       struct i915_request *rq;
+       int err;
 
-       GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32));
-       cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
+       rq = fetch_and_zero(&cache->rq);
+       if (!rq)
+               return 0;
 
-       __i915_gem_object_flush_map(obj, 0, sizeof(u32) * (cache->rq_size + 1));
-       i915_gem_object_unpin_map(obj);
+       if (cache->rq_vma) {
+               struct drm_i915_gem_object *obj = cache->rq_vma->obj;
 
-       intel_gt_chipset_flush(cache->rq->engine->gt);
+               GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32));
+               cache->rq_cmd[cache->rq_size++] = MI_BATCH_BUFFER_END;
 
-       i915_request_add(cache->rq);
-       cache->rq = NULL;
+               __i915_gem_object_flush_map(obj,
+                                           0, sizeof(u32) * cache->rq_size);
+               i915_gem_object_unpin_map(obj);
+       }
+
+       err = 0;
+       if (rq->engine->emit_init_breadcrumb)
+               err = rq->engine->emit_init_breadcrumb(rq);
+       if (!err)
+               err = rq->engine->emit_bb_start(rq,
+                                               rq->batch->node.start,
+                                               PAGE_SIZE,
+                                               reloc_bb_flags(cache));
+       if (err)
+               i915_request_set_error_once(rq, err);
+
+       intel_gt_chipset_flush(rq->engine->gt);
+       i915_request_add(rq);
+
+       return err;
 }
 
 static void reloc_cache_reset(struct reloc_cache *cache)
 {
        void *vaddr;
 
-       if (cache->rq)
-               reloc_gpu_flush(cache);
-
        if (!cache->vaddr)
                return;
 
@@ -1109,17 +1285,17 @@ static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma)
 }
 
 static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
-                            struct i915_vma *vma,
+                            struct intel_engine_cs *engine,
                             unsigned int len)
 {
        struct reloc_cache *cache = &eb->reloc_cache;
-       struct intel_engine_pool_node *pool;
+       struct intel_gt_buffer_pool_node *pool;
        struct i915_request *rq;
        struct i915_vma *batch;
        u32 *cmd;
        int err;
 
-       pool = intel_engine_get_pool(eb->engine, PAGE_SIZE);
+       pool = intel_gt_get_buffer_pool(engine->gt, PAGE_SIZE);
        if (IS_ERR(pool))
                return PTR_ERR(pool);
 
@@ -1132,7 +1308,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
                goto out_pool;
        }
 
-       batch = i915_vma_instance(pool->obj, vma->vm, NULL);
+       batch = i915_vma_instance(pool->obj, eb->context->vm, NULL);
        if (IS_ERR(batch)) {
                err = PTR_ERR(batch);
                goto err_unmap;
@@ -1142,26 +1318,32 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
        if (err)
                goto err_unmap;
 
-       rq = i915_request_create(eb->context);
+       if (engine == eb->context->engine) {
+               rq = i915_request_create(eb->context);
+       } else {
+               struct intel_context *ce;
+
+               ce = intel_context_create(engine);
+               if (IS_ERR(ce)) {
+                       err = PTR_ERR(ce);
+                       goto err_unpin;
+               }
+
+               i915_vm_put(ce->vm);
+               ce->vm = i915_vm_get(eb->context->vm);
+
+               rq = intel_context_create_request(ce);
+               intel_context_put(ce);
+       }
        if (IS_ERR(rq)) {
                err = PTR_ERR(rq);
                goto err_unpin;
        }
 
-       err = intel_engine_pool_mark_active(pool, rq);
+       err = intel_gt_buffer_pool_mark_active(pool, rq);
        if (err)
                goto err_request;
 
-       err = reloc_move_to_gpu(rq, vma);
-       if (err)
-               goto err_request;
-
-       err = eb->engine->emit_bb_start(rq,
-                                       batch->node.start, PAGE_SIZE,
-                                       cache->gen > 5 ? 0 : I915_DISPATCH_SECURE);
-       if (err)
-               goto skip_request;
-
        i915_vma_lock(batch);
        err = i915_request_await_object(rq, batch->obj, false);
        if (err == 0)
@@ -1176,6 +1358,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
        cache->rq = rq;
        cache->rq_cmd = cmd;
        cache->rq_size = 0;
+       cache->rq_vma = batch;
 
        /* Return with batch mapping (cmd) still pinned */
        goto out_pool;
@@ -1189,124 +1372,206 @@ err_unpin:
 err_unmap:
        i915_gem_object_unpin_map(pool->obj);
 out_pool:
-       intel_engine_pool_put(pool);
+       intel_gt_buffer_pool_put(pool);
        return err;
 }
 
+static bool reloc_can_use_engine(const struct intel_engine_cs *engine)
+{
+       return engine->class != VIDEO_DECODE_CLASS || !IS_GEN(engine->i915, 6);
+}
+
 static u32 *reloc_gpu(struct i915_execbuffer *eb,
                      struct i915_vma *vma,
                      unsigned int len)
 {
        struct reloc_cache *cache = &eb->reloc_cache;
        u32 *cmd;
-
-       if (cache->rq_size > PAGE_SIZE/sizeof(u32) - (len + 1))
-               reloc_gpu_flush(cache);
+       int err;
 
        if (unlikely(!cache->rq)) {
-               int err;
+               struct intel_engine_cs *engine = eb->engine;
 
-               if (!intel_engine_can_store_dword(eb->engine))
-                       return ERR_PTR(-ENODEV);
+               if (!reloc_can_use_engine(engine)) {
+                       engine = engine->gt->engine_class[COPY_ENGINE_CLASS][0];
+                       if (!engine)
+                               return ERR_PTR(-ENODEV);
+               }
 
-               err = __reloc_gpu_alloc(eb, vma, len);
+               err = __reloc_gpu_alloc(eb, engine, len);
                if (unlikely(err))
                        return ERR_PTR(err);
        }
 
+       if (vma != cache->target) {
+               err = reloc_move_to_gpu(cache->rq, vma);
+               if (unlikely(err)) {
+                       i915_request_set_error_once(cache->rq, err);
+                       return ERR_PTR(err);
+               }
+
+               cache->target = vma;
+       }
+
+       if (unlikely(cache->rq_size + len >
+                    PAGE_SIZE / sizeof(u32) - RELOC_TAIL)) {
+               err = reloc_gpu_chain(cache);
+               if (unlikely(err)) {
+                       i915_request_set_error_once(cache->rq, err);
+                       return ERR_PTR(err);
+               }
+       }
+
+       GEM_BUG_ON(cache->rq_size + len >= PAGE_SIZE  / sizeof(u32));
        cmd = cache->rq_cmd + cache->rq_size;
        cache->rq_size += len;
 
        return cmd;
 }
 
-static u64
-relocate_entry(struct i915_vma *vma,
-              const struct drm_i915_gem_relocation_entry *reloc,
-              struct i915_execbuffer *eb,
-              const struct i915_vma *target)
+static inline bool use_reloc_gpu(struct i915_vma *vma)
 {
-       u64 offset = reloc->offset;
-       u64 target_offset = relocation_target(reloc, target);
-       bool wide = eb->reloc_cache.use_64bit_reloc;
-       void *vaddr;
+       if (DBG_FORCE_RELOC == FORCE_GPU_RELOC)
+               return true;
 
-       if (!eb->reloc_cache.vaddr &&
-           (DBG_FORCE_RELOC == FORCE_GPU_RELOC ||
-            !dma_resv_test_signaled_rcu(vma->resv, true))) {
-               const unsigned int gen = eb->reloc_cache.gen;
-               unsigned int len;
-               u32 *batch;
-               u64 addr;
-
-               if (wide)
-                       len = offset & 7 ? 8 : 5;
-               else if (gen >= 4)
-                       len = 4;
-               else
-                       len = 3;
+       if (DBG_FORCE_RELOC)
+               return false;
 
-               batch = reloc_gpu(eb, vma, len);
-               if (IS_ERR(batch))
-                       goto repeat;
+       return !dma_resv_test_signaled_rcu(vma->resv, true);
+}
 
-               addr = gen8_canonical_addr(vma->node.start + offset);
-               if (wide) {
-                       if (offset & 7) {
-                               *batch++ = MI_STORE_DWORD_IMM_GEN4;
-                               *batch++ = lower_32_bits(addr);
-                               *batch++ = upper_32_bits(addr);
-                               *batch++ = lower_32_bits(target_offset);
-
-                               addr = gen8_canonical_addr(addr + 4);
-
-                               *batch++ = MI_STORE_DWORD_IMM_GEN4;
-                               *batch++ = lower_32_bits(addr);
-                               *batch++ = upper_32_bits(addr);
-                               *batch++ = upper_32_bits(target_offset);
-                       } else {
-                               *batch++ = (MI_STORE_DWORD_IMM_GEN4 | (1 << 21)) + 1;
-                               *batch++ = lower_32_bits(addr);
-                               *batch++ = upper_32_bits(addr);
-                               *batch++ = lower_32_bits(target_offset);
-                               *batch++ = upper_32_bits(target_offset);
-                       }
-               } else if (gen >= 6) {
+static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset)
+{
+       struct page *page;
+       unsigned long addr;
+
+       GEM_BUG_ON(vma->pages != vma->obj->mm.pages);
+
+       page = i915_gem_object_get_page(vma->obj, offset >> PAGE_SHIFT);
+       addr = PFN_PHYS(page_to_pfn(page));
+       GEM_BUG_ON(overflows_type(addr, u32)); /* expected dma32 */
+
+       return addr + offset_in_page(offset);
+}
+
+static bool __reloc_entry_gpu(struct i915_execbuffer *eb,
+                             struct i915_vma *vma,
+                             u64 offset,
+                             u64 target_addr)
+{
+       const unsigned int gen = eb->reloc_cache.gen;
+       unsigned int len;
+       u32 *batch;
+       u64 addr;
+
+       if (gen >= 8)
+               len = offset & 7 ? 8 : 5;
+       else if (gen >= 4)
+               len = 4;
+       else
+               len = 3;
+
+       batch = reloc_gpu(eb, vma, len);
+       if (IS_ERR(batch))
+               return false;
+
+       addr = gen8_canonical_addr(vma->node.start + offset);
+       if (gen >= 8) {
+               if (offset & 7) {
                        *batch++ = MI_STORE_DWORD_IMM_GEN4;
-                       *batch++ = 0;
-                       *batch++ = addr;
-                       *batch++ = target_offset;
-               } else if (gen >= 4) {
-                       *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
-                       *batch++ = 0;
-                       *batch++ = addr;
-                       *batch++ = target_offset;
+                       *batch++ = lower_32_bits(addr);
+                       *batch++ = upper_32_bits(addr);
+                       *batch++ = lower_32_bits(target_addr);
+
+                       addr = gen8_canonical_addr(addr + 4);
+
+                       *batch++ = MI_STORE_DWORD_IMM_GEN4;
+                       *batch++ = lower_32_bits(addr);
+                       *batch++ = upper_32_bits(addr);
+                       *batch++ = upper_32_bits(target_addr);
                } else {
-                       *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
-                       *batch++ = addr;
-                       *batch++ = target_offset;
+                       *batch++ = (MI_STORE_DWORD_IMM_GEN4 | (1 << 21)) + 1;
+                       *batch++ = lower_32_bits(addr);
+                       *batch++ = upper_32_bits(addr);
+                       *batch++ = lower_32_bits(target_addr);
+                       *batch++ = upper_32_bits(target_addr);
                }
-
-               goto out;
+       } else if (gen >= 6) {
+               *batch++ = MI_STORE_DWORD_IMM_GEN4;
+               *batch++ = 0;
+               *batch++ = addr;
+               *batch++ = target_addr;
+       } else if (IS_I965G(eb->i915)) {
+               *batch++ = MI_STORE_DWORD_IMM_GEN4;
+               *batch++ = 0;
+               *batch++ = vma_phys_addr(vma, offset);
+               *batch++ = target_addr;
+       } else if (gen >= 4) {
+               *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+               *batch++ = 0;
+               *batch++ = addr;
+               *batch++ = target_addr;
+       } else if (gen >= 3 &&
+                  !(IS_I915G(eb->i915) || IS_I915GM(eb->i915))) {
+               *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+               *batch++ = addr;
+               *batch++ = target_addr;
+       } else {
+               *batch++ = MI_STORE_DWORD_IMM;
+               *batch++ = vma_phys_addr(vma, offset);
+               *batch++ = target_addr;
        }
 
+       return true;
+}
+
+static bool reloc_entry_gpu(struct i915_execbuffer *eb,
+                           struct i915_vma *vma,
+                           u64 offset,
+                           u64 target_addr)
+{
+       if (eb->reloc_cache.vaddr)
+               return false;
+
+       if (!use_reloc_gpu(vma))
+               return false;
+
+       return __reloc_entry_gpu(eb, vma, offset, target_addr);
+}
+
+static u64
+relocate_entry(struct i915_vma *vma,
+              const struct drm_i915_gem_relocation_entry *reloc,
+              struct i915_execbuffer *eb,
+              const struct i915_vma *target)
+{
+       u64 target_addr = relocation_target(reloc, target);
+       u64 offset = reloc->offset;
+
+       if (!reloc_entry_gpu(eb, vma, offset, target_addr)) {
+               bool wide = eb->reloc_cache.use_64bit_reloc;
+               void *vaddr;
+
 repeat:
-       vaddr = reloc_vaddr(vma->obj, &eb->reloc_cache, offset >> PAGE_SHIFT);
-       if (IS_ERR(vaddr))
-               return PTR_ERR(vaddr);
+               vaddr = reloc_vaddr(vma->obj,
+                                   &eb->reloc_cache,
+                                   offset >> PAGE_SHIFT);
+               if (IS_ERR(vaddr))
+                       return PTR_ERR(vaddr);
 
-       clflush_write32(vaddr + offset_in_page(offset),
-                       lower_32_bits(target_offset),
-                       eb->reloc_cache.vaddr);
+               GEM_BUG_ON(!IS_ALIGNED(offset, sizeof(u32)));
+               clflush_write32(vaddr + offset_in_page(offset),
+                               lower_32_bits(target_addr),
+                               eb->reloc_cache.vaddr);
 
-       if (wide) {
-               offset += sizeof(u32);
-               target_offset >>= 32;
-               wide = false;
-               goto repeat;
+               if (wide) {
+                       offset += sizeof(u32);
+                       target_addr >>= 32;
+                       wide = false;
+                       goto repeat;
+               }
        }
 
-out:
        return target->node.start | UPDATE;
 }
 
@@ -1411,12 +1676,11 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
 {
 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
        struct drm_i915_gem_relocation_entry stack[N_RELOC(512)];
-       struct drm_i915_gem_relocation_entry __user *urelocs;
        const struct drm_i915_gem_exec_object2 *entry = ev->exec;
-       unsigned int remain;
+       struct drm_i915_gem_relocation_entry __user *urelocs =
+               u64_to_user_ptr(entry->relocs_ptr);
+       unsigned long remain = entry->relocation_count;
 
-       urelocs = u64_to_user_ptr(entry->relocs_ptr);
-       remain = entry->relocation_count;
        if (unlikely(remain > N_RELOC(ULONG_MAX)))
                return -EINVAL;
 
@@ -1425,13 +1689,13 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
         * to read. However, if the array is not writable the user loses
         * the updated relocation values.
         */
-       if (unlikely(!access_ok(urelocs, remain*sizeof(*urelocs))))
+       if (unlikely(!access_ok(urelocs, remain * sizeof(*urelocs))))
                return -EFAULT;
 
        do {
                struct drm_i915_gem_relocation_entry *r = stack;
                unsigned int count =
-                       min_t(unsigned int, remain, ARRAY_SIZE(stack));
+                       min_t(unsigned long, remain, ARRAY_SIZE(stack));
                unsigned int copied;
 
                /*
@@ -1494,9 +1758,7 @@ static int eb_relocate(struct i915_execbuffer *eb)
 {
        int err;
 
-       mutex_lock(&eb->gem_context->mutex);
        err = eb_lookup_vmas(eb);
-       mutex_unlock(&eb->gem_context->mutex);
        if (err)
                return err;
 
@@ -1509,15 +1771,20 @@ static int eb_relocate(struct i915_execbuffer *eb)
        /* The objects are in their final locations, apply the relocations. */
        if (eb->args->flags & __EXEC_HAS_RELOC) {
                struct eb_vma *ev;
+               int flush;
 
                list_for_each_entry(ev, &eb->relocs, reloc_link) {
                        err = eb_relocate_vma(eb, ev);
                        if (err)
-                               return err;
+                               break;
                }
+
+               flush = reloc_gpu_flush(&eb->reloc_cache);
+               if (!err)
+                       err = flush;
        }
 
-       return 0;
+       return err;
 }
 
 static int eb_move_to_gpu(struct i915_execbuffer *eb)
@@ -1597,19 +1864,15 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
                        err = i915_vma_move_to_active(vma, eb->request, flags);
 
                i915_vma_unlock(vma);
-
-               __eb_unreserve_vma(vma, flags);
-               i915_vma_put(vma);
-
-               ev->vma = NULL;
+               eb_unreserve_vma(ev);
        }
        ww_acquire_fini(&acquire);
 
+       eb_vma_array_put(fetch_and_zero(&eb->array));
+
        if (unlikely(err))
                goto err_skip;
 
-       eb->exec = NULL;
-
        /* Unconditionally flush any chipset caches (for streaming writes). */
        intel_gt_chipset_flush(eb->engine->gt);
        return 0;
@@ -1784,7 +2047,7 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
        dma_resv_add_excl_fence(shadow->resv, &pw->base.dma);
        dma_resv_unlock(shadow->resv);
 
-       dma_fence_work_commit(&pw->base);
+       dma_fence_work_commit_imm(&pw->base);
        return 0;
 
 err_batch_unlock:
@@ -1804,7 +2067,7 @@ err_free:
 static int eb_parse(struct i915_execbuffer *eb)
 {
        struct drm_i915_private *i915 = eb->i915;
-       struct intel_engine_pool_node *pool;
+       struct intel_gt_buffer_pool_node *pool;
        struct i915_vma *shadow, *trampoline;
        unsigned int len;
        int err;
@@ -1827,7 +2090,7 @@ static int eb_parse(struct i915_execbuffer *eb)
                len += I915_CMD_PARSER_TRAMPOLINE_SIZE;
        }
 
-       pool = intel_engine_get_pool(eb->engine, len);
+       pool = intel_gt_get_buffer_pool(eb->engine->gt, len);
        if (IS_ERR(pool))
                return PTR_ERR(pool);
 
@@ -1861,6 +2124,7 @@ static int eb_parse(struct i915_execbuffer *eb)
        eb->vma[eb->buffer_count].vma = i915_vma_get(shadow);
        eb->vma[eb->buffer_count].flags = __EXEC_OBJECT_HAS_PIN;
        eb->batch = &eb->vma[eb->buffer_count++];
+       eb->vma[eb->buffer_count].vma = NULL;
 
        eb->trampoline = trampoline;
        eb->batch_start_offset = 0;
@@ -1874,7 +2138,7 @@ err_trampoline:
 err_shadow:
        i915_vma_unpin(shadow);
 err:
-       intel_engine_pool_put(pool);
+       intel_gt_buffer_pool_put(pool);
        return err;
 }
 
@@ -2318,39 +2582,13 @@ static void eb_request_add(struct i915_execbuffer *eb)
        /* Check that the context wasn't destroyed before submission */
        if (likely(!intel_context_is_closed(eb->context))) {
                attr = eb->gem_context->sched;
-
-               /*
-                * Boost actual workloads past semaphores!
-                *
-                * With semaphores we spin on one engine waiting for another,
-                * simply to reduce the latency of starting our work when
-                * the signaler completes. However, if there is any other
-                * work that we could be doing on this engine instead, that
-                * is better utilisation and will reduce the overall duration
-                * of the current work. To avoid PI boosting a semaphore
-                * far in the distance past over useful work, we keep a history
-                * of any semaphore use along our dependency chain.
-                */
-               if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
-                       attr.priority |= I915_PRIORITY_NOSEMAPHORE;
-
-               /*
-                * Boost priorities to new clients (new request flows).
-                *
-                * Allow interactive/synchronous clients to jump ahead of
-                * the bulk clients. (FQ_CODEL)
-                */
-               if (list_empty(&rq->sched.signalers_list))
-                       attr.priority |= I915_PRIORITY_WAIT;
        } else {
                /* Serialise with context_close via the add_to_timeline */
                i915_request_set_error_once(rq, -ENOENT);
                __i915_request_skip(rq);
        }
 
-       local_bh_disable();
        __i915_request_queue(rq, &attr);
-       local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
 
        /* Try to clean up the client's timeline after submitting the request */
        if (prev)
@@ -2369,7 +2607,6 @@ i915_gem_do_execbuffer(struct drm_device *dev,
        struct drm_i915_private *i915 = to_i915(dev);
        struct i915_execbuffer eb;
        struct dma_fence *in_fence = NULL;
-       struct dma_fence *exec_fence = NULL;
        struct sync_file *out_fence = NULL;
        struct i915_vma *batch;
        int out_fence_fd = -1;
@@ -2386,8 +2623,6 @@ i915_gem_do_execbuffer(struct drm_device *dev,
                args->flags |= __EXEC_HAS_RELOC;
 
        eb.exec = exec;
-       eb.vma = (struct eb_vma *)(exec + args->buffer_count + 1);
-       eb.vma[0].vma = NULL;
 
        eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
        reloc_cache_init(&eb.reloc_cache, eb.i915);
@@ -2414,30 +2649,22 @@ i915_gem_do_execbuffer(struct drm_device *dev,
        if (args->flags & I915_EXEC_IS_PINNED)
                eb.batch_flags |= I915_DISPATCH_PINNED;
 
-       if (args->flags & I915_EXEC_FENCE_IN) {
+#define IN_FENCES (I915_EXEC_FENCE_IN | I915_EXEC_FENCE_SUBMIT)
+       if (args->flags & IN_FENCES) {
+               if ((args->flags & IN_FENCES) == IN_FENCES)
+                       return -EINVAL;
+
                in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
                if (!in_fence)
                        return -EINVAL;
        }
-
-       if (args->flags & I915_EXEC_FENCE_SUBMIT) {
-               if (in_fence) {
-                       err = -EINVAL;
-                       goto err_in_fence;
-               }
-
-               exec_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
-               if (!exec_fence) {
-                       err = -EINVAL;
-                       goto err_in_fence;
-               }
-       }
+#undef IN_FENCES
 
        if (args->flags & I915_EXEC_FENCE_OUT) {
                out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
                if (out_fence_fd < 0) {
                        err = out_fence_fd;
-                       goto err_exec_fence;
+                       goto err_in_fence;
                }
        }
 
@@ -2528,14 +2755,13 @@ i915_gem_do_execbuffer(struct drm_device *dev,
        }
 
        if (in_fence) {
-               err = i915_request_await_dma_fence(eb.request, in_fence);
-               if (err < 0)
-                       goto err_request;
-       }
-
-       if (exec_fence) {
-               err = i915_request_await_execution(eb.request, exec_fence,
-                                                  eb.engine->bond_execute);
+               if (args->flags & I915_EXEC_FENCE_SUBMIT)
+                       err = i915_request_await_execution(eb.request,
+                                                          in_fence,
+                                                          eb.engine->bond_execute);
+               else
+                       err = i915_request_await_dma_fence(eb.request,
+                                                          in_fence);
                if (err < 0)
                        goto err_request;
        }
@@ -2563,7 +2789,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
         */
        eb.request->batch = batch;
        if (batch->private)
-               intel_engine_pool_mark_active(batch->private, eb.request);
+               intel_gt_buffer_pool_mark_active(batch->private, eb.request);
 
        trace_i915_request_queue(eb.request, eb.batch_flags);
        err = eb_submit(&eb, batch);
@@ -2592,10 +2818,8 @@ err_batch_unpin:
                i915_vma_unpin(batch);
 err_parse:
        if (batch->private)
-               intel_engine_pool_put(batch->private);
+               intel_gt_buffer_pool_put(batch->private);
 err_vma:
-       if (eb.exec)
-               eb_release_vmas(&eb);
        if (eb.trampoline)
                i915_vma_unpin(eb.trampoline);
        eb_unpin_engine(&eb);
@@ -2606,8 +2830,6 @@ err_destroy:
 err_out_fence:
        if (out_fence_fd != -1)
                put_unused_fd(out_fence_fd);
-err_exec_fence:
-       dma_fence_put(exec_fence);
 err_in_fence:
        dma_fence_put(in_fence);
        return err;
@@ -2615,7 +2837,7 @@ err_in_fence:
 
 static size_t eb_element_size(void)
 {
-       return sizeof(struct drm_i915_gem_exec_object2) + sizeof(struct eb_vma);
+       return sizeof(struct drm_i915_gem_exec_object2);
 }
 
 static bool check_buffer_count(size_t count)
@@ -2671,7 +2893,7 @@ i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
        /* Copy in the exec list from userland */
        exec_list = kvmalloc_array(count, sizeof(*exec_list),
                                   __GFP_NOWARN | GFP_KERNEL);
-       exec2_list = kvmalloc_array(count + 1, eb_element_size(),
+       exec2_list = kvmalloc_array(count, eb_element_size(),
                                    __GFP_NOWARN | GFP_KERNEL);
        if (exec_list == NULL || exec2_list == NULL) {
                drm_dbg(&i915->drm,
@@ -2749,8 +2971,7 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
        if (err)
                return err;
 
-       /* Allocate an extra slot for use by the command parser */
-       exec2_list = kvmalloc_array(count + 1, eb_element_size(),
+       exec2_list = kvmalloc_array(count, eb_element_size(),
                                    __GFP_NOWARN | GFP_KERNEL);
        if (exec2_list == NULL) {
                drm_dbg(&i915->drm, "Failed to allocate exec list for %zd buffers\n",
@@ -2818,3 +3039,7 @@ end:;
        kvfree(exec2_list);
        return err;
 }
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/i915_gem_execbuffer.c"
+#endif
index 2f6100ec260820c6f17b0f2af5e896e8923c91ff..8ab842c80f995a62bccf903d93403dd5ae2e957f 100644 (file)
@@ -72,8 +72,8 @@ i915_gem_object_lock_fence(struct drm_i915_gem_object *obj)
                       0, 0);
 
        if (i915_sw_fence_await_reservation(&stub->chain,
-                                           obj->base.resv, NULL,
-                                           true, I915_FENCE_TIMEOUT,
+                                           obj->base.resv, NULL, true,
+                                           i915_fence_timeout(to_i915(obj->base.dev)),
                                            I915_FENCE_GFP) < 0)
                goto err;
 
index b39c24dae64e6a02645c6a6bb9364fc86d9348c2..70f5f82da288b87577195b09986be0d77c2923b8 100644 (file)
@@ -396,6 +396,38 @@ err:
        return i915_error_to_vmf_fault(ret);
 }
 
+static int
+vm_access(struct vm_area_struct *area, unsigned long addr,
+         void *buf, int len, int write)
+{
+       struct i915_mmap_offset *mmo = area->vm_private_data;
+       struct drm_i915_gem_object *obj = mmo->obj;
+       void *vaddr;
+
+       if (i915_gem_object_is_readonly(obj) && write)
+               return -EACCES;
+
+       addr -= area->vm_start;
+       if (addr >= obj->base.size)
+               return -EINVAL;
+
+       /* As this is primarily for debugging, let's focus on simplicity */
+       vaddr = i915_gem_object_pin_map(obj, I915_MAP_FORCE_WC);
+       if (IS_ERR(vaddr))
+               return PTR_ERR(vaddr);
+
+       if (write) {
+               memcpy(vaddr + addr, buf, len);
+               __i915_gem_object_flush_map(obj, addr, len);
+       } else {
+               memcpy(buf, vaddr + addr, len);
+       }
+
+       i915_gem_object_unpin_map(obj);
+
+       return len;
+}
+
 void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
 {
        struct i915_vma *vma;
@@ -745,12 +777,14 @@ static void vm_close(struct vm_area_struct *vma)
 
 static const struct vm_operations_struct vm_ops_gtt = {
        .fault = vm_fault_gtt,
+       .access = vm_access,
        .open = vm_open,
        .close = vm_close,
 };
 
 static const struct vm_operations_struct vm_ops_cpu = {
        .fault = vm_fault_cpu,
+       .access = vm_access,
        .open = vm_open,
        .close = vm_close,
 };
index 5da9f9e534b94981c65e81ef6ba1f4eb5f634852..99356c00c19ee7afcc200671471cfc7a1ea7f855 100644 (file)
@@ -135,9 +135,7 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
                if (vma) {
                        GEM_BUG_ON(vma->obj != obj);
                        GEM_BUG_ON(!atomic_read(&vma->open_count));
-                       if (atomic_dec_and_test(&vma->open_count) &&
-                           !i915_vma_is_ggtt(vma))
-                               i915_vma_close(vma);
+                       i915_vma_close(vma);
                }
                mutex_unlock(&ctx->mutex);
 
@@ -164,9 +162,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
                                    struct llist_node *freed)
 {
        struct drm_i915_gem_object *obj, *on;
-       intel_wakeref_t wakeref;
 
-       wakeref = intel_runtime_pm_get(&i915->runtime_pm);
        llist_for_each_entry_safe(obj, on, freed, freed) {
                struct i915_mmap_offset *mmo, *mn;
 
@@ -206,7 +202,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
                }
                obj->mmo.offsets = RB_ROOT;
 
-               GEM_BUG_ON(atomic_read(&obj->bind_count));
                GEM_BUG_ON(obj->userfault_count);
                GEM_BUG_ON(!list_empty(&obj->lut_list));
 
@@ -227,7 +222,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
                call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
                cond_resched();
        }
-       intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 }
 
 void i915_gem_flush_free_objects(struct drm_i915_private *i915)
index e00792158f130bfe87adab8786ffb17e07576505..f457d713049125f5f704a5b97824462827ce939c 100644 (file)
@@ -6,8 +6,8 @@
 #include "i915_drv.h"
 #include "gt/intel_context.h"
 #include "gt/intel_engine_pm.h"
-#include "gt/intel_engine_pool.h"
 #include "gt/intel_gt.h"
+#include "gt/intel_gt_buffer_pool.h"
 #include "gt/intel_ring.h"
 #include "i915_gem_clflush.h"
 #include "i915_gem_object_blt.h"
@@ -18,7 +18,7 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
 {
        struct drm_i915_private *i915 = ce->vm->i915;
        const u32 block_size = SZ_8M; /* ~1ms at 8GiB/s preemption delay */
-       struct intel_engine_pool_node *pool;
+       struct intel_gt_buffer_pool_node *pool;
        struct i915_vma *batch;
        u64 offset;
        u64 count;
@@ -33,7 +33,7 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
        count = div_u64(round_up(vma->size, block_size), block_size);
        size = (1 + 8 * count) * sizeof(u32);
        size = round_up(size, PAGE_SIZE);
-       pool = intel_engine_get_pool(ce->engine, size);
+       pool = intel_gt_get_buffer_pool(ce->engine->gt, size);
        if (IS_ERR(pool)) {
                err = PTR_ERR(pool);
                goto out_pm;
@@ -78,10 +78,12 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
        } while (rem);
 
        *cmd = MI_BATCH_BUFFER_END;
-       intel_gt_chipset_flush(ce->vm->gt);
 
+       i915_gem_object_flush_map(pool->obj);
        i915_gem_object_unpin_map(pool->obj);
 
+       intel_gt_chipset_flush(ce->vm->gt);
+
        batch = i915_vma_instance(pool->obj, ce->vm, NULL);
        if (IS_ERR(batch)) {
                err = PTR_ERR(batch);
@@ -96,7 +98,7 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
        return batch;
 
 out_put:
-       intel_engine_pool_put(pool);
+       intel_gt_buffer_pool_put(pool);
 out_pm:
        intel_engine_pm_put(ce->engine);
        return ERR_PTR(err);
@@ -114,13 +116,13 @@ int intel_emit_vma_mark_active(struct i915_vma *vma, struct i915_request *rq)
        if (unlikely(err))
                return err;
 
-       return intel_engine_pool_mark_active(vma->private, rq);
+       return intel_gt_buffer_pool_mark_active(vma->private, rq);
 }
 
 void intel_emit_vma_release(struct intel_context *ce, struct i915_vma *vma)
 {
        i915_vma_unpin(vma);
-       intel_engine_pool_put(vma->private);
+       intel_gt_buffer_pool_put(vma->private);
        intel_engine_pm_put(ce->engine);
 }
 
@@ -213,7 +215,7 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
 {
        struct drm_i915_private *i915 = ce->vm->i915;
        const u32 block_size = SZ_8M; /* ~1ms at 8GiB/s preemption delay */
-       struct intel_engine_pool_node *pool;
+       struct intel_gt_buffer_pool_node *pool;
        struct i915_vma *batch;
        u64 src_offset, dst_offset;
        u64 count, rem;
@@ -228,7 +230,7 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
        count = div_u64(round_up(dst->size, block_size), block_size);
        size = (1 + 11 * count) * sizeof(u32);
        size = round_up(size, PAGE_SIZE);
-       pool = intel_engine_get_pool(ce->engine, size);
+       pool = intel_gt_get_buffer_pool(ce->engine->gt, size);
        if (IS_ERR(pool)) {
                err = PTR_ERR(pool);
                goto out_pm;
@@ -289,10 +291,12 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
        } while (rem);
 
        *cmd = MI_BATCH_BUFFER_END;
-       intel_gt_chipset_flush(ce->vm->gt);
 
+       i915_gem_object_flush_map(pool->obj);
        i915_gem_object_unpin_map(pool->obj);
 
+       intel_gt_chipset_flush(ce->vm->gt);
+
        batch = i915_vma_instance(pool->obj, ce->vm, NULL);
        if (IS_ERR(batch)) {
                err = PTR_ERR(batch);
@@ -307,7 +311,7 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
        return batch;
 
 out_put:
-       intel_engine_pool_put(pool);
+       intel_gt_buffer_pool_put(pool);
 out_pm:
        intel_engine_pm_put(ce->engine);
        return ERR_PTR(err);
index 243a43a878249d1c8c3404461d152bfe214f8686..8bcd336a90dc6e77d323b2537112bebf14a8ac87 100644 (file)
@@ -10,7 +10,6 @@
 
 #include "gt/intel_context.h"
 #include "gt/intel_engine_pm.h"
-#include "gt/intel_engine_pool.h"
 #include "i915_vma.h"
 
 struct drm_i915_gem_object;
index a0b10bcd8d8acbd94e81f004a07428636d54bf7a..54ee658bb168b60865ebec9ae0e90976bfeb4c8f 100644 (file)
@@ -179,9 +179,6 @@ struct drm_i915_gem_object {
 #define TILING_MASK (FENCE_MINIMUM_STRIDE - 1)
 #define STRIDE_MASK (~TILING_MASK)
 
-       /** Count of VMA actually bound by this object */
-       atomic_t bind_count;
-
        struct {
                /*
                 * Protects the pages and their use. Do not use directly, but
index 24f4cadea1149fd3aa921cf8b30b743f3853c92d..af9e48ee4a334c10981397299e99a50b232fcccf 100644 (file)
@@ -199,8 +199,6 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
        if (i915_gem_object_has_pinned_pages(obj))
                return -EBUSY;
 
-       GEM_BUG_ON(atomic_read(&obj->bind_count));
-
        /* May be called by shrinker from within get_pages() (on another bo) */
        mutex_lock(&obj->mm.lock);
        if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
@@ -393,6 +391,7 @@ void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
        GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
                                     offset, size, obj->base.size));
 
+       wmb(); /* let all previous writes be visible to coherent partners */
        obj->mm.dirty = true;
 
        if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
index 698e22420dc5e946c17e956ab8fb57f3b6b8722d..7fe9831aa9bab9b7591b6c6855553ee67848fcda 100644 (file)
@@ -10,8 +10,6 @@
 
 #include <drm/drm.h> /* for drm_legacy.h! */
 #include <drm/drm_cache.h>
-#include <drm/drm_legacy.h> /* for drm_pci.h! */
-#include <drm/drm_pci.h>
 
 #include "gt/intel_gt.h"
 #include "i915_drv.h"
index 03e5eb4c99d11464988e2594801afb06d92454ad..5b65ce738b160d39252b73ea6e4eef56892a77d9 100644 (file)
@@ -26,18 +26,6 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
        if (!i915_gem_object_is_shrinkable(obj))
                return false;
 
-       /*
-        * Only report true if by unbinding the object and putting its pages
-        * we can actually make forward progress towards freeing physical
-        * pages.
-        *
-        * If the pages are pinned for any other reason than being bound
-        * to the GPU, simply unbinding from the GPU is not going to succeed
-        * in releasing our pin count on the pages themselves.
-        */
-       if (atomic_read(&obj->mm.pages_pin_count) > atomic_read(&obj->bind_count))
-               return false;
-
        /*
         * We can only return physical pages to the system if we can either
         * discard the contents (because the user has marked them as being
@@ -54,6 +42,8 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj,
        flags = 0;
        if (shrink & I915_SHRINK_ACTIVE)
                flags = I915_GEM_OBJECT_UNBIND_ACTIVE;
+       if (!(shrink & I915_SHRINK_BOUND))
+               flags = I915_GEM_OBJECT_UNBIND_TEST;
 
        if (i915_gem_object_unbind(obj, flags) == 0)
                __i915_gem_object_put_pages(obj);
@@ -194,10 +184,6 @@ i915_gem_shrink(struct drm_i915_private *i915,
                            i915_gem_object_is_framebuffer(obj))
                                continue;
 
-                       if (!(shrink & I915_SHRINK_BOUND) &&
-                           atomic_read(&obj->bind_count))
-                               continue;
-
                        if (!can_release_pages(obj))
                                continue;
 
index 5557dfa83a7bf9b861f69dd3c618a37c80bedf48..dc250278bd2ca90b62ccd57caffc424d6b33ff2f 100644 (file)
@@ -381,14 +381,14 @@ static int i915_gem_init_stolen(struct drm_i915_private *i915)
        mutex_init(&i915->mm.stolen_lock);
 
        if (intel_vgpu_active(i915)) {
-               dev_notice(i915->drm.dev,
+               drm_notice(&i915->drm,
                           "%s, disabling use of stolen memory\n",
                           "iGVT-g active");
                return 0;
        }
 
        if (intel_vtd_active() && INTEL_GEN(i915) < 8) {
-               dev_notice(i915->drm.dev,
+               drm_notice(&i915->drm,
                           "%s, disabling use of stolen memory\n",
                           "DMAR active");
                return 0;
index fa16f2c3f3ac538b4b982eb51d4471ccbe83675b..2b46c6530da9eb3b55ed42b6d97351b47165905d 100644 (file)
@@ -88,8 +88,7 @@ static void huge_put_pages(struct drm_i915_gem_object *obj,
 }
 
 static const struct drm_i915_gem_object_ops huge_ops = {
-       .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
-                I915_GEM_OBJECT_IS_SHRINKABLE,
+       .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
        .get_pages = huge_get_pages,
        .put_pages = huge_put_pages,
 };
index d4f94ca9ae0dd5fe6258b6cc354f636650f4b8b2..c9988b6d5c8898ad6fd6ae2625f35d22cbde93bb 100644 (file)
@@ -421,7 +421,7 @@ static int igt_mock_exhaust_device_supported_pages(void *arg)
 
                        err = i915_vma_pin(vma, 0, 0, PIN_USER);
                        if (err)
-                               goto out_close;
+                               goto out_put;
 
                        err = igt_check_page_sizes(vma);
 
@@ -432,8 +432,6 @@ static int igt_mock_exhaust_device_supported_pages(void *arg)
                        }
 
                        i915_vma_unpin(vma);
-                       i915_vma_close(vma);
-
                        i915_gem_object_put(obj);
 
                        if (err)
@@ -443,8 +441,6 @@ static int igt_mock_exhaust_device_supported_pages(void *arg)
 
        goto out_device;
 
-out_close:
-       i915_vma_close(vma);
 out_put:
        i915_gem_object_put(obj);
 out_device:
@@ -492,7 +488,7 @@ static int igt_mock_memory_region_huge_pages(void *arg)
 
                        err = i915_vma_pin(vma, 0, 0, PIN_USER);
                        if (err)
-                               goto out_close;
+                               goto out_put;
 
                        err = igt_check_page_sizes(vma);
                        if (err)
@@ -515,8 +511,6 @@ static int igt_mock_memory_region_huge_pages(void *arg)
                        }
 
                        i915_vma_unpin(vma);
-                       i915_vma_close(vma);
-
                        __i915_gem_object_put_pages(obj);
                        i915_gem_object_put(obj);
                }
@@ -526,8 +520,6 @@ static int igt_mock_memory_region_huge_pages(void *arg)
 
 out_unpin:
        i915_vma_unpin(vma);
-out_close:
-       i915_vma_close(vma);
 out_put:
        i915_gem_object_put(obj);
 out_region:
@@ -587,10 +579,8 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
                }
 
                err = i915_vma_pin(vma, 0, 0, flags);
-               if (err) {
-                       i915_vma_close(vma);
+               if (err)
                        goto out_unpin;
-               }
 
 
                err = igt_check_page_sizes(vma);
@@ -603,10 +593,8 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
 
                i915_vma_unpin(vma);
 
-               if (err) {
-                       i915_vma_close(vma);
+               if (err)
                        goto out_unpin;
-               }
 
                /*
                 * Try all the other valid offsets until the next
@@ -615,16 +603,12 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
                 */
                for (offset = 4096; offset < page_size; offset += 4096) {
                        err = i915_vma_unbind(vma);
-                       if (err) {
-                               i915_vma_close(vma);
+                       if (err)
                                goto out_unpin;
-                       }
 
                        err = i915_vma_pin(vma, 0, 0, flags | offset);
-                       if (err) {
-                               i915_vma_close(vma);
+                       if (err)
                                goto out_unpin;
-                       }
 
                        err = igt_check_page_sizes(vma);
 
@@ -636,10 +620,8 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
 
                        i915_vma_unpin(vma);
 
-                       if (err) {
-                               i915_vma_close(vma);
+                       if (err)
                                goto out_unpin;
-                       }
 
                        if (igt_timeout(end_time,
                                        "%s timed out at offset %x with page-size %x\n",
@@ -647,8 +629,6 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
                                break;
                }
 
-               i915_vma_close(vma);
-
                i915_gem_object_unpin_pages(obj);
                __i915_gem_object_put_pages(obj);
                i915_gem_object_put(obj);
@@ -670,12 +650,6 @@ static void close_object_list(struct list_head *objects,
        struct drm_i915_gem_object *obj, *on;
 
        list_for_each_entry_safe(obj, on, objects, st_link) {
-               struct i915_vma *vma;
-
-               vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
-               if (!IS_ERR(vma))
-                       i915_vma_close(vma);
-
                list_del(&obj->st_link);
                i915_gem_object_unpin_pages(obj);
                __i915_gem_object_put_pages(obj);
@@ -912,7 +886,7 @@ static int igt_mock_ppgtt_64K(void *arg)
 
                        err = i915_vma_pin(vma, 0, 0, flags);
                        if (err)
-                               goto out_vma_close;
+                               goto out_object_unpin;
 
                        err = igt_check_page_sizes(vma);
                        if (err)
@@ -945,8 +919,6 @@ static int igt_mock_ppgtt_64K(void *arg)
                        }
 
                        i915_vma_unpin(vma);
-                       i915_vma_close(vma);
-
                        i915_gem_object_unpin_pages(obj);
                        __i915_gem_object_put_pages(obj);
                        i915_gem_object_put(obj);
@@ -957,8 +929,6 @@ static int igt_mock_ppgtt_64K(void *arg)
 
 out_vma_unpin:
        i915_vma_unpin(vma);
-out_vma_close:
-       i915_vma_close(vma);
 out_object_unpin:
        i915_gem_object_unpin_pages(obj);
 out_object_put:
@@ -1070,7 +1040,7 @@ static int __igt_write_huge(struct intel_context *ce,
 
        err = i915_vma_unbind(vma);
        if (err)
-               goto out_vma_close;
+               return err;
 
        err = i915_vma_pin(vma, size, 0, flags | offset);
        if (err) {
@@ -1081,7 +1051,7 @@ static int __igt_write_huge(struct intel_context *ce,
                if (err == -ENOSPC && i915_is_ggtt(ce->vm))
                        err = 0;
 
-               goto out_vma_close;
+               return err;
        }
 
        err = igt_check_page_sizes(vma);
@@ -1102,8 +1072,6 @@ static int __igt_write_huge(struct intel_context *ce,
 
 out_vma_unpin:
        i915_vma_unpin(vma);
-out_vma_close:
-       __i915_vma_put(vma);
        return err;
 }
 
@@ -1490,7 +1458,7 @@ static int igt_ppgtt_pin_update(void *arg)
 
                err = i915_vma_pin(vma, SZ_2M, 0, flags);
                if (err)
-                       goto out_close;
+                       goto out_put;
 
                if (vma->page_sizes.sg < page_size) {
                        pr_info("Unable to allocate page-size %x, finishing test early\n",
@@ -1527,8 +1495,6 @@ static int igt_ppgtt_pin_update(void *arg)
                        goto out_unpin;
 
                i915_vma_unpin(vma);
-               i915_vma_close(vma);
-
                i915_gem_object_put(obj);
        }
 
@@ -1546,7 +1512,7 @@ static int igt_ppgtt_pin_update(void *arg)
 
        err = i915_vma_pin(vma, 0, 0, flags);
        if (err)
-               goto out_close;
+               goto out_put;
 
        /*
         * Make sure we don't end up with something like where the pde is still
@@ -1576,8 +1542,6 @@ static int igt_ppgtt_pin_update(void *arg)
 
 out_unpin:
        i915_vma_unpin(vma);
-out_close:
-       i915_vma_close(vma);
 out_put:
        i915_gem_object_put(obj);
 out_vm:
@@ -1629,13 +1593,11 @@ static int igt_tmpfs_fallback(void *arg)
 
        err = i915_vma_pin(vma, 0, 0, PIN_USER);
        if (err)
-               goto out_close;
+               goto out_put;
 
        err = igt_check_page_sizes(vma);
 
        i915_vma_unpin(vma);
-out_close:
-       i915_vma_close(vma);
 out_put:
        i915_gem_object_put(obj);
 out_restore:
@@ -1682,7 +1644,7 @@ static int igt_shrink_thp(void *arg)
 
        err = i915_vma_pin(vma, 0, 0, flags);
        if (err)
-               goto out_close;
+               goto out_put;
 
        if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
                pr_info("failed to allocate THP, finishing test early\n");
@@ -1706,7 +1668,7 @@ static int igt_shrink_thp(void *arg)
        i915_gem_context_unlock_engines(ctx);
        i915_vma_unpin(vma);
        if (err)
-               goto out_close;
+               goto out_put;
 
        /*
         * Now that the pages are *unpinned* shrink-all should invoke
@@ -1716,18 +1678,18 @@ static int igt_shrink_thp(void *arg)
        if (i915_gem_object_has_pages(obj)) {
                pr_err("shrink-all didn't truncate the pages\n");
                err = -EINVAL;
-               goto out_close;
+               goto out_put;
        }
 
        if (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys) {
                pr_err("residual page-size bits left\n");
                err = -EINVAL;
-               goto out_close;
+               goto out_put;
        }
 
        err = i915_vma_pin(vma, 0, 0, flags);
        if (err)
-               goto out_close;
+               goto out_put;
 
        while (n--) {
                err = cpu_check(obj, n, 0xdeadbeaf);
@@ -1737,8 +1699,6 @@ static int igt_shrink_thp(void *arg)
 
 out_unpin:
        i915_vma_unpin(vma);
-out_close:
-       i915_vma_close(vma);
 out_put:
        i915_gem_object_put(obj);
 out_vm:
@@ -1777,21 +1737,20 @@ int i915_gem_huge_page_mock_selftests(void)
        if (!i915_vm_is_4lvl(&ppgtt->vm)) {
                pr_err("failed to create 48b PPGTT\n");
                err = -EINVAL;
-               goto out_close;
+               goto out_put;
        }
 
        /* If we were ever hit this then it's time to mock the 64K scratch */
        if (!i915_vm_has_scratch_64K(&ppgtt->vm)) {
                pr_err("PPGTT missing 64K scratch page\n");
                err = -EINVAL;
-               goto out_close;
+               goto out_put;
        }
 
        err = i915_subtests(tests, ppgtt);
 
-out_close:
+out_put:
        i915_vm_put(&ppgtt->vm);
-
 out_unlock:
        drm_dev_put(&dev_priv->drm);
        return err;
index b972be165e85c3dd88fe614475b1eba779decd9e..8fe3ad2ee34e6458011b897f4ff863c3c3e0196b 100644 (file)
@@ -7,9 +7,12 @@
 
 #include "gt/intel_engine_user.h"
 #include "gt/intel_gt.h"
+#include "gt/intel_gpu_commands.h"
+#include "gem/i915_gem_lmem.h"
 
 #include "selftests/igt_flush_test.h"
 #include "selftests/mock_drm.h"
+#include "selftests/i915_random.h"
 #include "huge_gem_object.h"
 #include "mock_context.h"
 
@@ -127,10 +130,573 @@ static int igt_client_fill(void *arg)
        } while (1);
 }
 
+#define WIDTH 512
+#define HEIGHT 32
+
+struct blit_buffer {
+       struct i915_vma *vma;
+       u32 start_val;
+       u32 tiling;
+};
+
+struct tiled_blits {
+       struct intel_context *ce;
+       struct blit_buffer buffers[3];
+       struct blit_buffer scratch;
+       struct i915_vma *batch;
+       u64 hole;
+       u32 width;
+       u32 height;
+};
+
+static int prepare_blit(const struct tiled_blits *t,
+                       struct blit_buffer *dst,
+                       struct blit_buffer *src,
+                       struct drm_i915_gem_object *batch)
+{
+       const int gen = INTEL_GEN(to_i915(batch->base.dev));
+       bool use_64b_reloc = gen >= 8;
+       u32 src_pitch, dst_pitch;
+       u32 cmd, *cs;
+
+       cs = i915_gem_object_pin_map(batch, I915_MAP_WC);
+       if (IS_ERR(cs))
+               return PTR_ERR(cs);
+
+       *cs++ = MI_LOAD_REGISTER_IMM(1);
+       *cs++ = i915_mmio_reg_offset(BCS_SWCTRL);
+       cmd = (BCS_SRC_Y | BCS_DST_Y) << 16;
+       if (src->tiling == I915_TILING_Y)
+               cmd |= BCS_SRC_Y;
+       if (dst->tiling == I915_TILING_Y)
+               cmd |= BCS_DST_Y;
+       *cs++ = cmd;
+
+       cmd = MI_FLUSH_DW;
+       if (gen >= 8)
+               cmd++;
+       *cs++ = cmd;
+       *cs++ = 0;
+       *cs++ = 0;
+       *cs++ = 0;
+
+       cmd = XY_SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (8 - 2);
+       if (gen >= 8)
+               cmd += 2;
+
+       src_pitch = t->width * 4;
+       if (src->tiling) {
+               cmd |= XY_SRC_COPY_BLT_SRC_TILED;
+               src_pitch /= 4;
+       }
+
+       dst_pitch = t->width * 4;
+       if (dst->tiling) {
+               cmd |= XY_SRC_COPY_BLT_DST_TILED;
+               dst_pitch /= 4;
+       }
+
+       *cs++ = cmd;
+       *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | dst_pitch;
+       *cs++ = 0;
+       *cs++ = t->height << 16 | t->width;
+       *cs++ = lower_32_bits(dst->vma->node.start);
+       if (use_64b_reloc)
+               *cs++ = upper_32_bits(dst->vma->node.start);
+       *cs++ = 0;
+       *cs++ = src_pitch;
+       *cs++ = lower_32_bits(src->vma->node.start);
+       if (use_64b_reloc)
+               *cs++ = upper_32_bits(src->vma->node.start);
+
+       *cs++ = MI_BATCH_BUFFER_END;
+
+       i915_gem_object_flush_map(batch);
+       i915_gem_object_unpin_map(batch);
+
+       return 0;
+}
+
+static void tiled_blits_destroy_buffers(struct tiled_blits *t)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(t->buffers); i++)
+               i915_vma_put(t->buffers[i].vma);
+
+       i915_vma_put(t->scratch.vma);
+       i915_vma_put(t->batch);
+}
+
+static struct i915_vma *
+__create_vma(struct tiled_blits *t, size_t size, bool lmem)
+{
+       struct drm_i915_private *i915 = t->ce->vm->i915;
+       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
+
+       if (lmem)
+               obj = i915_gem_object_create_lmem(i915, size, 0);
+       else
+               obj = i915_gem_object_create_shmem(i915, size);
+       if (IS_ERR(obj))
+               return ERR_CAST(obj);
+
+       vma = i915_vma_instance(obj, t->ce->vm, NULL);
+       if (IS_ERR(vma))
+               i915_gem_object_put(obj);
+
+       return vma;
+}
+
+static struct i915_vma *create_vma(struct tiled_blits *t, bool lmem)
+{
+       return __create_vma(t, PAGE_ALIGN(t->width * t->height * 4), lmem);
+}
+
+static int tiled_blits_create_buffers(struct tiled_blits *t,
+                                     int width, int height,
+                                     struct rnd_state *prng)
+{
+       struct drm_i915_private *i915 = t->ce->engine->i915;
+       int i;
+
+       t->width = width;
+       t->height = height;
+
+       t->batch = __create_vma(t, PAGE_SIZE, false);
+       if (IS_ERR(t->batch))
+               return PTR_ERR(t->batch);
+
+       t->scratch.vma = create_vma(t, false);
+       if (IS_ERR(t->scratch.vma)) {
+               i915_vma_put(t->batch);
+               return PTR_ERR(t->scratch.vma);
+       }
+
+       for (i = 0; i < ARRAY_SIZE(t->buffers); i++) {
+               struct i915_vma *vma;
+
+               vma = create_vma(t, HAS_LMEM(i915) && i % 2);
+               if (IS_ERR(vma)) {
+                       tiled_blits_destroy_buffers(t);
+                       return PTR_ERR(vma);
+               }
+
+               t->buffers[i].vma = vma;
+               t->buffers[i].tiling =
+                       i915_prandom_u32_max_state(I915_TILING_Y + 1, prng);
+       }
+
+       return 0;
+}
+
+static void fill_scratch(struct tiled_blits *t, u32 *vaddr, u32 val)
+{
+       int i;
+
+       t->scratch.start_val = val;
+       for (i = 0; i < t->width * t->height; i++)
+               vaddr[i] = val++;
+
+       i915_gem_object_flush_map(t->scratch.vma->obj);
+}
+
+static u64 swizzle_bit(unsigned int bit, u64 offset)
+{
+       return (offset & BIT_ULL(bit)) >> (bit - 6);
+}
+
+static u64 tiled_offset(const struct intel_gt *gt,
+                       u64 v,
+                       unsigned int stride,
+                       unsigned int tiling)
+{
+       unsigned int swizzle;
+       u64 x, y;
+
+       if (tiling == I915_TILING_NONE)
+               return v;
+
+       y = div64_u64_rem(v, stride, &x);
+
+       if (tiling == I915_TILING_X) {
+               v = div64_u64_rem(y, 8, &y) * stride * 8;
+               v += y * 512;
+               v += div64_u64_rem(x, 512, &x) << 12;
+               v += x;
+
+               swizzle = gt->ggtt->bit_6_swizzle_x;
+       } else {
+               const unsigned int ytile_span = 16;
+               const unsigned int ytile_height = 512;
+
+               v = div64_u64_rem(y, 32, &y) * stride * 32;
+               v += y * ytile_span;
+               v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
+               v += x;
+
+               swizzle = gt->ggtt->bit_6_swizzle_y;
+       }
+
+       switch (swizzle) {
+       case I915_BIT_6_SWIZZLE_9:
+               v ^= swizzle_bit(9, v);
+               break;
+       case I915_BIT_6_SWIZZLE_9_10:
+               v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v);
+               break;
+       case I915_BIT_6_SWIZZLE_9_11:
+               v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v);
+               break;
+       case I915_BIT_6_SWIZZLE_9_10_11:
+               v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v);
+               break;
+       }
+
+       return v;
+}
+
+static const char *repr_tiling(int tiling)
+{
+       switch (tiling) {
+       case I915_TILING_NONE: return "linear";
+       case I915_TILING_X: return "X";
+       case I915_TILING_Y: return "Y";
+       default: return "unknown";
+       }
+}
+
+static int verify_buffer(const struct tiled_blits *t,
+                        struct blit_buffer *buf,
+                        struct rnd_state *prng)
+{
+       const u32 *vaddr;
+       int ret = 0;
+       int x, y, p;
+
+       x = i915_prandom_u32_max_state(t->width, prng);
+       y = i915_prandom_u32_max_state(t->height, prng);
+       p = y * t->width + x;
+
+       vaddr = i915_gem_object_pin_map(buf->vma->obj, I915_MAP_WC);
+       if (IS_ERR(vaddr))
+               return PTR_ERR(vaddr);
+
+       if (vaddr[0] != buf->start_val) {
+               ret = -EINVAL;
+       } else {
+               u64 v = tiled_offset(buf->vma->vm->gt,
+                                    p * 4, t->width * 4,
+                                    buf->tiling);
+
+               if (vaddr[v / sizeof(*vaddr)] != buf->start_val + p)
+                       ret = -EINVAL;
+       }
+       if (ret) {
+               pr_err("Invalid %s tiling detected at (%d, %d), start_val %x\n",
+                      repr_tiling(buf->tiling),
+                      x, y, buf->start_val);
+               igt_hexdump(vaddr, 4096);
+       }
+
+       i915_gem_object_unpin_map(buf->vma->obj);
+       return ret;
+}
+
+static int move_to_active(struct i915_vma *vma,
+                         struct i915_request *rq,
+                         unsigned int flags)
+{
+       int err;
+
+       i915_vma_lock(vma);
+       err = i915_request_await_object(rq, vma->obj, false);
+       if (err == 0)
+               err = i915_vma_move_to_active(vma, rq, flags);
+       i915_vma_unlock(vma);
+
+       return err;
+}
+
+static int pin_buffer(struct i915_vma *vma, u64 addr)
+{
+       int err;
+
+       if (drm_mm_node_allocated(&vma->node) && vma->node.start != addr) {
+               err = i915_vma_unbind(vma);
+               if (err)
+                       return err;
+       }
+
+       err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED | addr);
+       if (err)
+               return err;
+
+       return 0;
+}
+
+static int
+tiled_blit(struct tiled_blits *t,
+          struct blit_buffer *dst, u64 dst_addr,
+          struct blit_buffer *src, u64 src_addr)
+{
+       struct i915_request *rq;
+       int err;
+
+       err = pin_buffer(src->vma, src_addr);
+       if (err) {
+               pr_err("Cannot pin src @ %llx\n", src_addr);
+               return err;
+       }
+
+       err = pin_buffer(dst->vma, dst_addr);
+       if (err) {
+               pr_err("Cannot pin dst @ %llx\n", dst_addr);
+               goto err_src;
+       }
+
+       err = i915_vma_pin(t->batch, 0, 0, PIN_USER | PIN_HIGH);
+       if (err) {
+               pr_err("cannot pin batch\n");
+               goto err_dst;
+       }
+
+       err = prepare_blit(t, dst, src, t->batch->obj);
+       if (err)
+               goto err_bb;
+
+       rq = intel_context_create_request(t->ce);
+       if (IS_ERR(rq)) {
+               err = PTR_ERR(rq);
+               goto err_bb;
+       }
+
+       err = move_to_active(t->batch, rq, 0);
+       if (!err)
+               err = move_to_active(src->vma, rq, 0);
+       if (!err)
+               err = move_to_active(dst->vma, rq, 0);
+       if (!err)
+               err = rq->engine->emit_bb_start(rq,
+                                               t->batch->node.start,
+                                               t->batch->node.size,
+                                               0);
+       i915_request_get(rq);
+       i915_request_add(rq);
+       if (i915_request_wait(rq, 0, HZ / 2) < 0)
+               err = -ETIME;
+       i915_request_put(rq);
+
+       dst->start_val = src->start_val;
+err_bb:
+       i915_vma_unpin(t->batch);
+err_dst:
+       i915_vma_unpin(dst->vma);
+err_src:
+       i915_vma_unpin(src->vma);
+       return err;
+}
+
+static struct tiled_blits *
+tiled_blits_create(struct intel_engine_cs *engine, struct rnd_state *prng)
+{
+       struct drm_mm_node hole;
+       struct tiled_blits *t;
+       u64 hole_size;
+       int err;
+
+       t = kzalloc(sizeof(*t), GFP_KERNEL);
+       if (!t)
+               return ERR_PTR(-ENOMEM);
+
+       t->ce = intel_context_create(engine);
+       if (IS_ERR(t->ce)) {
+               err = PTR_ERR(t->ce);
+               goto err_free;
+       }
+
+       hole_size = 2 * PAGE_ALIGN(WIDTH * HEIGHT * 4);
+       hole_size *= 2; /* room to maneuver */
+       hole_size += 2 * I915_GTT_MIN_ALIGNMENT;
+
+       mutex_lock(&t->ce->vm->mutex);
+       memset(&hole, 0, sizeof(hole));
+       err = drm_mm_insert_node_in_range(&t->ce->vm->mm, &hole,
+                                         hole_size, 0, I915_COLOR_UNEVICTABLE,
+                                         0, U64_MAX,
+                                         DRM_MM_INSERT_BEST);
+       if (!err)
+               drm_mm_remove_node(&hole);
+       mutex_unlock(&t->ce->vm->mutex);
+       if (err) {
+               err = -ENODEV;
+               goto err_put;
+       }
+
+       t->hole = hole.start + I915_GTT_MIN_ALIGNMENT;
+       pr_info("Using hole at %llx\n", t->hole);
+
+       err = tiled_blits_create_buffers(t, WIDTH, HEIGHT, prng);
+       if (err)
+               goto err_put;
+
+       return t;
+
+err_put:
+       intel_context_put(t->ce);
+err_free:
+       kfree(t);
+       return ERR_PTR(err);
+}
+
+static void tiled_blits_destroy(struct tiled_blits *t)
+{
+       tiled_blits_destroy_buffers(t);
+
+       intel_context_put(t->ce);
+       kfree(t);
+}
+
+static int tiled_blits_prepare(struct tiled_blits *t,
+                              struct rnd_state *prng)
+{
+       u64 offset = PAGE_ALIGN(t->width * t->height * 4);
+       u32 *map;
+       int err;
+       int i;
+
+       map = i915_gem_object_pin_map(t->scratch.vma->obj, I915_MAP_WC);
+       if (IS_ERR(map))
+               return PTR_ERR(map);
+
+       /* Use scratch to fill objects */
+       for (i = 0; i < ARRAY_SIZE(t->buffers); i++) {
+               fill_scratch(t, map, prandom_u32_state(prng));
+               GEM_BUG_ON(verify_buffer(t, &t->scratch, prng));
+
+               err = tiled_blit(t,
+                                &t->buffers[i], t->hole + offset,
+                                &t->scratch, t->hole);
+               if (err == 0)
+                       err = verify_buffer(t, &t->buffers[i], prng);
+               if (err) {
+                       pr_err("Failed to create buffer %d\n", i);
+                       break;
+               }
+       }
+
+       i915_gem_object_unpin_map(t->scratch.vma->obj);
+       return err;
+}
+
+static int tiled_blits_bounce(struct tiled_blits *t, struct rnd_state *prng)
+{
+       u64 offset =
+               round_up(t->width * t->height * 4, 2 * I915_GTT_MIN_ALIGNMENT);
+       int err;
+
+       /* We want to check position invariant tiling across GTT eviction */
+
+       err = tiled_blit(t,
+                        &t->buffers[1], t->hole + offset / 2,
+                        &t->buffers[0], t->hole + 2 * offset);
+       if (err)
+               return err;
+
+       /* Reposition so that we overlap the old addresses, and slightly off */
+       err = tiled_blit(t,
+                        &t->buffers[2], t->hole + I915_GTT_MIN_ALIGNMENT,
+                        &t->buffers[1], t->hole + 3 * offset / 2);
+       if (err)
+               return err;
+
+       err = verify_buffer(t, &t->buffers[2], prng);
+       if (err)
+               return err;
+
+       return 0;
+}
+
+static int __igt_client_tiled_blits(struct intel_engine_cs *engine,
+                                   struct rnd_state *prng)
+{
+       struct tiled_blits *t;
+       int err;
+
+       t = tiled_blits_create(engine, prng);
+       if (IS_ERR(t))
+               return PTR_ERR(t);
+
+       err = tiled_blits_prepare(t, prng);
+       if (err)
+               goto out;
+
+       err = tiled_blits_bounce(t, prng);
+       if (err)
+               goto out;
+
+out:
+       tiled_blits_destroy(t);
+       return err;
+}
+
+static bool has_bit17_swizzle(int sw)
+{
+       return (sw == I915_BIT_6_SWIZZLE_9_10_17 ||
+               sw == I915_BIT_6_SWIZZLE_9_17);
+}
+
+static bool bad_swizzling(struct drm_i915_private *i915)
+{
+       struct i915_ggtt *ggtt = &i915->ggtt;
+
+       if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+               return true;
+
+       if (has_bit17_swizzle(ggtt->bit_6_swizzle_x) ||
+           has_bit17_swizzle(ggtt->bit_6_swizzle_y))
+               return true;
+
+       return false;
+}
+
+static int igt_client_tiled_blits(void *arg)
+{
+       struct drm_i915_private *i915 = arg;
+       I915_RND_STATE(prng);
+       int inst = 0;
+
+       /* Test requires explicit BLT tiling controls */
+       if (INTEL_GEN(i915) < 4)
+               return 0;
+
+       if (bad_swizzling(i915)) /* Requires sane (sub-page) swizzling */
+               return 0;
+
+       do {
+               struct intel_engine_cs *engine;
+               int err;
+
+               engine = intel_engine_lookup_user(i915,
+                                                 I915_ENGINE_CLASS_COPY,
+                                                 inst++);
+               if (!engine)
+                       return 0;
+
+               err = __igt_client_tiled_blits(engine, &prng);
+               if (err == -ENODEV)
+                       err = 0;
+               if (err)
+                       return err;
+       } while (1);
+}
+
 int i915_gem_client_blt_live_selftests(struct drm_i915_private *i915)
 {
        static const struct i915_subtest tests[] = {
                SUBTEST(igt_client_fill),
+               SUBTEST(igt_client_tiled_blits),
        };
 
        if (intel_gt_is_wedged(&i915->gt))
index 3f6079e1dfb6d48129d8cf4089f942f29e346657..87d7d8aa080f7dcf6203d42d743e821414d1d38e 100644 (file)
@@ -158,6 +158,8 @@ static int wc_set(struct context *ctx, unsigned long offset, u32 v)
                return PTR_ERR(map);
 
        map[offset / sizeof(*map)] = v;
+
+       __i915_gem_object_flush_map(ctx->obj, offset, sizeof(*map));
        i915_gem_object_unpin_map(ctx->obj);
 
        return 0;
index 54b86cf7f5d2ea1988a5435b345798ad3f47eeb6..b8197889064141b366de1accd2b387d773eb9bfa 100644 (file)
@@ -972,12 +972,6 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
                goto err_batch;
        }
 
-       err = rq->engine->emit_bb_start(rq,
-                                       batch->node.start, batch->node.size,
-                                       0);
-       if (err)
-               goto err_request;
-
        i915_vma_lock(batch);
        err = i915_request_await_object(rq, batch->obj, false);
        if (err == 0)
@@ -994,6 +988,18 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
        if (err)
                goto skip_request;
 
+       if (rq->engine->emit_init_breadcrumb) {
+               err = rq->engine->emit_init_breadcrumb(rq);
+               if (err)
+                       goto skip_request;
+       }
+
+       err = rq->engine->emit_bb_start(rq,
+                                       batch->node.start, batch->node.size,
+                                       0);
+       if (err)
+               goto skip_request;
+
        i915_vma_unpin_and_release(&batch, 0);
        i915_vma_unpin(vma);
 
@@ -1005,7 +1011,6 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
 
 skip_request:
        i915_request_set_error_once(rq, err);
-err_request:
        i915_request_add(rq);
 err_batch:
        i915_vma_unpin_and_release(&batch, 0);
@@ -1541,10 +1546,6 @@ static int write_to_scratch(struct i915_gem_context *ctx,
                goto err_unpin;
        }
 
-       err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
-       if (err)
-               goto err_request;
-
        i915_vma_lock(vma);
        err = i915_request_await_object(rq, vma->obj, false);
        if (err == 0)
@@ -1553,6 +1554,16 @@ static int write_to_scratch(struct i915_gem_context *ctx,
        if (err)
                goto skip_request;
 
+       if (rq->engine->emit_init_breadcrumb) {
+               err = rq->engine->emit_init_breadcrumb(rq);
+               if (err)
+                       goto skip_request;
+       }
+
+       err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
+       if (err)
+               goto skip_request;
+
        i915_vma_unpin(vma);
 
        i915_request_add(rq);
@@ -1560,7 +1571,6 @@ static int write_to_scratch(struct i915_gem_context *ctx,
        goto out_vm;
 skip_request:
        i915_request_set_error_once(rq, err);
-err_request:
        i915_request_add(rq);
 err_unpin:
        i915_vma_unpin(vma);
@@ -1674,10 +1684,6 @@ static int read_from_scratch(struct i915_gem_context *ctx,
                goto err_unpin;
        }
 
-       err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, flags);
-       if (err)
-               goto err_request;
-
        i915_vma_lock(vma);
        err = i915_request_await_object(rq, vma->obj, true);
        if (err == 0)
@@ -1686,8 +1692,17 @@ static int read_from_scratch(struct i915_gem_context *ctx,
        if (err)
                goto skip_request;
 
+       if (rq->engine->emit_init_breadcrumb) {
+               err = rq->engine->emit_init_breadcrumb(rq);
+               if (err)
+                       goto skip_request;
+       }
+
+       err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, flags);
+       if (err)
+               goto skip_request;
+
        i915_vma_unpin(vma);
-       i915_vma_close(vma);
 
        i915_request_add(rq);
 
@@ -1709,7 +1724,6 @@ static int read_from_scratch(struct i915_gem_context *ctx,
        goto out_vm;
 skip_request:
        i915_request_set_error_once(rq, err);
-err_request:
        i915_request_add(rq);
 err_unpin:
        i915_vma_unpin(vma);
@@ -1925,7 +1939,7 @@ static int mock_context_barrier(void *arg)
                goto out;
        }
 
-       rq = igt_request_alloc(ctx, i915->engine[RCS0]);
+       rq = igt_request_alloc(ctx, i915->gt.engine[RCS0]);
        if (IS_ERR(rq)) {
                pr_err("Request allocation failed!\n");
                goto out;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
new file mode 100644 (file)
index 0000000..a49016f
--- /dev/null
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "i915_selftest.h"
+
+#include "gt/intel_engine_pm.h"
+#include "selftests/igt_flush_test.h"
+
+static u64 read_reloc(const u32 *map, int x, const u64 mask)
+{
+       u64 reloc;
+
+       memcpy(&reloc, &map[x], sizeof(reloc));
+       return reloc & mask;
+}
+
+static int __igt_gpu_reloc(struct i915_execbuffer *eb,
+                          struct drm_i915_gem_object *obj)
+{
+       const unsigned int offsets[] = { 8, 3, 0 };
+       const u64 mask =
+               GENMASK_ULL(eb->reloc_cache.use_64bit_reloc ? 63 : 31, 0);
+       const u32 *map = page_mask_bits(obj->mm.mapping);
+       struct i915_request *rq;
+       struct i915_vma *vma;
+       int err;
+       int i;
+
+       vma = i915_vma_instance(obj, eb->context->vm, NULL);
+       if (IS_ERR(vma))
+               return PTR_ERR(vma);
+
+       err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH);
+       if (err)
+               return err;
+
+       /* 8-Byte aligned */
+       if (!__reloc_entry_gpu(eb, vma,
+                              offsets[0] * sizeof(u32),
+                              0)) {
+               err = -EIO;
+               goto unpin_vma;
+       }
+
+       /* !8-Byte aligned */
+       if (!__reloc_entry_gpu(eb, vma,
+                              offsets[1] * sizeof(u32),
+                              1)) {
+               err = -EIO;
+               goto unpin_vma;
+       }
+
+       /* Skip to the end of the cmd page */
+       i = PAGE_SIZE / sizeof(u32) - RELOC_TAIL - 1;
+       i -= eb->reloc_cache.rq_size;
+       memset32(eb->reloc_cache.rq_cmd + eb->reloc_cache.rq_size,
+                MI_NOOP, i);
+       eb->reloc_cache.rq_size += i;
+
+       /* Force batch chaining */
+       if (!__reloc_entry_gpu(eb, vma,
+                              offsets[2] * sizeof(u32),
+                              2)) {
+               err = -EIO;
+               goto unpin_vma;
+       }
+
+       GEM_BUG_ON(!eb->reloc_cache.rq);
+       rq = i915_request_get(eb->reloc_cache.rq);
+       err = reloc_gpu_flush(&eb->reloc_cache);
+       if (err)
+               goto put_rq;
+       GEM_BUG_ON(eb->reloc_cache.rq);
+
+       err = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE, HZ / 2);
+       if (err) {
+               intel_gt_set_wedged(eb->engine->gt);
+               goto put_rq;
+       }
+
+       if (!i915_request_completed(rq)) {
+               pr_err("%s: did not wait for relocations!\n", eb->engine->name);
+               err = -EINVAL;
+               goto put_rq;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(offsets); i++) {
+               u64 reloc = read_reloc(map, offsets[i], mask);
+
+               if (reloc != i) {
+                       pr_err("%s[%d]: map[%d] %llx != %x\n",
+                              eb->engine->name, i, offsets[i], reloc, i);
+                       err = -EINVAL;
+               }
+       }
+       if (err)
+               igt_hexdump(map, 4096);
+
+put_rq:
+       i915_request_put(rq);
+unpin_vma:
+       i915_vma_unpin(vma);
+       return err;
+}
+
+static int igt_gpu_reloc(void *arg)
+{
+       struct i915_execbuffer eb;
+       struct drm_i915_gem_object *scratch;
+       int err = 0;
+       u32 *map;
+
+       eb.i915 = arg;
+
+       scratch = i915_gem_object_create_internal(eb.i915, 4096);
+       if (IS_ERR(scratch))
+               return PTR_ERR(scratch);
+
+       map = i915_gem_object_pin_map(scratch, I915_MAP_WC);
+       if (IS_ERR(map)) {
+               err = PTR_ERR(map);
+               goto err_scratch;
+       }
+
+       for_each_uabi_engine(eb.engine, eb.i915) {
+               reloc_cache_init(&eb.reloc_cache, eb.i915);
+               memset(map, POISON_INUSE, 4096);
+
+               intel_engine_pm_get(eb.engine);
+               eb.context = intel_context_create(eb.engine);
+               if (IS_ERR(eb.context)) {
+                       err = PTR_ERR(eb.context);
+                       goto err_pm;
+               }
+
+               err = intel_context_pin(eb.context);
+               if (err)
+                       goto err_put;
+
+               err = __igt_gpu_reloc(&eb, scratch);
+
+               intel_context_unpin(eb.context);
+err_put:
+               intel_context_put(eb.context);
+err_pm:
+               intel_engine_pm_put(eb.engine);
+               if (err)
+                       break;
+       }
+
+       if (igt_flush_test(eb.i915))
+               err = -EIO;
+
+err_scratch:
+       i915_gem_object_put(scratch);
+       return err;
+}
+
+int i915_gem_execbuffer_live_selftests(struct drm_i915_private *i915)
+{
+       static const struct i915_subtest tests[] = {
+               SUBTEST(igt_gpu_reloc),
+       };
+
+       if (intel_gt_is_wedged(&i915->gt))
+               return 0;
+
+       return i915_live_subtests(tests, i915);
+}
index 43912e9b683dccb1a43dae9f5a31890baedd831e..9c7402ce5bf9c51a855ffc14b23815830c5b6c74 100644 (file)
@@ -952,6 +952,129 @@ static int igt_mmap(void *arg)
        return 0;
 }
 
+static const char *repr_mmap_type(enum i915_mmap_type type)
+{
+       switch (type) {
+       case I915_MMAP_TYPE_GTT: return "gtt";
+       case I915_MMAP_TYPE_WB: return "wb";
+       case I915_MMAP_TYPE_WC: return "wc";
+       case I915_MMAP_TYPE_UC: return "uc";
+       default: return "unknown";
+       }
+}
+
+static bool can_access(const struct drm_i915_gem_object *obj)
+{
+       unsigned int flags =
+               I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_HAS_IOMEM;
+
+       return i915_gem_object_type_has(obj, flags);
+}
+
+static int __igt_mmap_access(struct drm_i915_private *i915,
+                            struct drm_i915_gem_object *obj,
+                            enum i915_mmap_type type)
+{
+       struct i915_mmap_offset *mmo;
+       unsigned long __user *ptr;
+       unsigned long A, B;
+       unsigned long x, y;
+       unsigned long addr;
+       int err;
+
+       memset(&A, 0xAA, sizeof(A));
+       memset(&B, 0xBB, sizeof(B));
+
+       if (!can_mmap(obj, type) || !can_access(obj))
+               return 0;
+
+       mmo = mmap_offset_attach(obj, type, NULL);
+       if (IS_ERR(mmo))
+               return PTR_ERR(mmo);
+
+       addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
+       if (IS_ERR_VALUE(addr))
+               return addr;
+       ptr = (unsigned long __user *)addr;
+
+       err = __put_user(A, ptr);
+       if (err) {
+               pr_err("%s(%s): failed to write into user mmap\n",
+                      obj->mm.region->name, repr_mmap_type(type));
+               goto out_unmap;
+       }
+
+       intel_gt_flush_ggtt_writes(&i915->gt);
+
+       err = access_process_vm(current, addr, &x, sizeof(x), 0);
+       if (err != sizeof(x)) {
+               pr_err("%s(%s): access_process_vm() read failed\n",
+                      obj->mm.region->name, repr_mmap_type(type));
+               goto out_unmap;
+       }
+
+       err = access_process_vm(current, addr, &B, sizeof(B), FOLL_WRITE);
+       if (err != sizeof(B)) {
+               pr_err("%s(%s): access_process_vm() write failed\n",
+                      obj->mm.region->name, repr_mmap_type(type));
+               goto out_unmap;
+       }
+
+       intel_gt_flush_ggtt_writes(&i915->gt);
+
+       err = __get_user(y, ptr);
+       if (err) {
+               pr_err("%s(%s): failed to read from user mmap\n",
+                      obj->mm.region->name, repr_mmap_type(type));
+               goto out_unmap;
+       }
+
+       if (x != A || y != B) {
+               pr_err("%s(%s): failed to read/write values, found (%lx, %lx)\n",
+                      obj->mm.region->name, repr_mmap_type(type),
+                      x, y);
+               err = -EINVAL;
+               goto out_unmap;
+       }
+
+out_unmap:
+       vm_munmap(addr, obj->base.size);
+       return err;
+}
+
+static int igt_mmap_access(void *arg)
+{
+       struct drm_i915_private *i915 = arg;
+       struct intel_memory_region *mr;
+       enum intel_region_id id;
+
+       for_each_memory_region(mr, i915, id) {
+               struct drm_i915_gem_object *obj;
+               int err;
+
+               obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0);
+               if (obj == ERR_PTR(-ENODEV))
+                       continue;
+
+               if (IS_ERR(obj))
+                       return PTR_ERR(obj);
+
+               err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_GTT);
+               if (err == 0)
+                       err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WB);
+               if (err == 0)
+                       err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WC);
+               if (err == 0)
+                       err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_UC);
+
+               i915_gem_object_put(obj);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
 static int __igt_mmap_gpu(struct drm_i915_private *i915,
                          struct drm_i915_gem_object *obj,
                          enum i915_mmap_type type)
@@ -1156,9 +1279,6 @@ static int __igt_mmap_revoke(struct drm_i915_private *i915,
        if (err)
                goto out_unmap;
 
-       GEM_BUG_ON(mmo->mmap_type == I915_MMAP_TYPE_GTT &&
-                  !atomic_read(&obj->bind_count));
-
        err = check_present(addr, obj->base.size);
        if (err) {
                pr_err("%s: was not present\n", obj->mm.region->name);
@@ -1175,7 +1295,6 @@ static int __igt_mmap_revoke(struct drm_i915_private *i915,
                pr_err("Failed to unbind object!\n");
                goto out_unmap;
        }
-       GEM_BUG_ON(atomic_read(&obj->bind_count));
 
        if (type != I915_MMAP_TYPE_GTT) {
                __i915_gem_object_put_pages(obj);
@@ -1233,6 +1352,7 @@ int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
                SUBTEST(igt_smoke_tiling),
                SUBTEST(igt_mmap_offset_exhaustion),
                SUBTEST(igt_mmap),
+               SUBTEST(igt_mmap_access),
                SUBTEST(igt_mmap_revoke),
                SUBTEST(igt_mmap_gpu),
        };
index 2b6db6f799de4af775f7d67d9eeda453ea2f00bc..faa5b6d917954e0d75ad0c9f34b7865577fd4ec7 100644 (file)
@@ -14,7 +14,7 @@ static int igt_gem_object(void *arg)
 {
        struct drm_i915_private *i915 = arg;
        struct drm_i915_gem_object *obj;
-       int err = -ENOMEM;
+       int err;
 
        /* Basic test to ensure we can create an object */
 
index 772d8cba7da9715e3efeaa07955d08cf13dc7011..e21b5023ca7d18edda6eadf289c26e6618a6da6e 100644 (file)
@@ -83,6 +83,8 @@ igt_emit_store_dw(struct i915_vma *vma,
                offset += PAGE_SIZE;
        }
        *cmd = MI_BATCH_BUFFER_END;
+
+       i915_gem_object_flush_map(obj);
        i915_gem_object_unpin_map(obj);
 
        intel_gt_chipset_flush(vma->vm->gt);
@@ -126,16 +128,6 @@ int igt_gpu_fill_dw(struct intel_context *ce,
                goto err_batch;
        }
 
-       flags = 0;
-       if (INTEL_GEN(ce->vm->i915) <= 5)
-               flags |= I915_DISPATCH_SECURE;
-
-       err = rq->engine->emit_bb_start(rq,
-                                       batch->node.start, batch->node.size,
-                                       flags);
-       if (err)
-               goto err_request;
-
        i915_vma_lock(batch);
        err = i915_request_await_object(rq, batch->obj, false);
        if (err == 0)
@@ -152,15 +144,17 @@ int igt_gpu_fill_dw(struct intel_context *ce,
        if (err)
                goto skip_request;
 
-       i915_request_add(rq);
-
-       i915_vma_unpin_and_release(&batch, 0);
+       flags = 0;
+       if (INTEL_GEN(ce->vm->i915) <= 5)
+               flags |= I915_DISPATCH_SECURE;
 
-       return 0;
+       err = rq->engine->emit_bb_start(rq,
+                                       batch->node.start, batch->node.size,
+                                       flags);
 
 skip_request:
-       i915_request_set_error_once(rq, err);
-err_request:
+       if (err)
+               i915_request_set_error_once(rq, err);
        i915_request_add(rq);
 err_batch:
        i915_vma_unpin_and_release(&batch, 0);
index 6a5e9ab20b94ec46e0a92e75c79cea3f8a6ee726..5e3725e622411f7f89e0959a17686491cf629406 100644 (file)
@@ -32,5 +32,5 @@ void debugfs_engines_register(struct intel_gt *gt, struct dentry *root)
                { "engines", &engines_fops },
        };
 
-       debugfs_gt_register_files(gt, root, files, ARRAY_SIZE(files));
+       intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gt);
 }
index 75255aaacaed4e7b0ccc3ddab8f695ee505c87e2..1de5fbaa1cf9cfb1883e759b0842f5ec7410e1ad 100644 (file)
@@ -9,6 +9,7 @@
 #include "debugfs_engines.h"
 #include "debugfs_gt.h"
 #include "debugfs_gt_pm.h"
+#include "uc/intel_uc_debugfs.h"
 #include "i915_drv.h"
 
 void debugfs_gt_register(struct intel_gt *gt)
@@ -24,17 +25,19 @@ void debugfs_gt_register(struct intel_gt *gt)
 
        debugfs_engines_register(gt, root);
        debugfs_gt_pm_register(gt, root);
+
+       intel_uc_debugfs_register(&gt->uc, root);
 }
 
-void debugfs_gt_register_files(struct intel_gt *gt,
-                              struct dentry *root,
-                              const struct debugfs_gt_file *files,
-                              unsigned long count)
+void intel_gt_debugfs_register_files(struct dentry *root,
+                                    const struct debugfs_gt_file *files,
+                                    unsigned long count, void *data)
 {
        while (count--) {
-               if (!files->eval || files->eval(gt))
+               umode_t mode = files->fops->write ? 0644 : 0444;
+               if (!files->eval || files->eval(data))
                        debugfs_create_file(files->name,
-                                           0444, root, gt,
+                                           mode, root, data,
                                            files->fops);
 
                files++;
index 4ea0f06cda8f9bb752d77b7129a515b80e1334ff..f77540f727e94d197e5c293267fef0dd2dbade31 100644 (file)
@@ -28,12 +28,11 @@ void debugfs_gt_register(struct intel_gt *gt);
 struct debugfs_gt_file {
        const char *name;
        const struct file_operations *fops;
-       bool (*eval)(const struct intel_gt *gt);
+       bool (*eval)(void *data);
 };
 
-void debugfs_gt_register_files(struct intel_gt *gt,
-                              struct dentry *root,
-                              const struct debugfs_gt_file *files,
-                              unsigned long count);
+void intel_gt_debugfs_register_files(struct dentry *root,
+                                    const struct debugfs_gt_file *files,
+                                    unsigned long count, void *data);
 
 #endif /* DEBUGFS_GT_H */
index 059c9e5c002e8180c040caf1f9dc846ab6e1114d..174a245533223d1d065173f0e592b80d629ec774 100644 (file)
@@ -10,6 +10,7 @@
 #include "debugfs_gt_pm.h"
 #include "i915_drv.h"
 #include "intel_gt.h"
+#include "intel_gt_clock_utils.h"
 #include "intel_llc.h"
 #include "intel_rc6.h"
 #include "intel_rps.h"
@@ -268,7 +269,7 @@ static int frequency_show(struct seq_file *m, void *unused)
                           yesno(rpmodectl & GEN6_RP_ENABLE));
                seq_printf(m, "SW control enabled: %s\n",
                           yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
-                                 GEN6_RP_MEDIA_SW_MODE));
+                                GEN6_RP_MEDIA_SW_MODE));
 
                vlv_punit_get(i915);
                freq_sts = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
@@ -300,8 +301,9 @@ static int frequency_show(struct seq_file *m, void *unused)
                u32 rp_state_cap;
                u32 rpmodectl, rpinclimit, rpdeclimit;
                u32 rpstat, cagf, reqf;
-               u32 rpupei, rpcurup, rpprevup;
-               u32 rpdownei, rpcurdown, rpprevdown;
+               u32 rpcurupei, rpcurup, rpprevup;
+               u32 rpcurdownei, rpcurdown, rpprevdown;
+               u32 rpupei, rpupt, rpdownei, rpdownt;
                u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
                int max_freq;
 
@@ -334,12 +336,19 @@ static int frequency_show(struct seq_file *m, void *unused)
                rpdeclimit = intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD);
 
                rpstat = intel_uncore_read(uncore, GEN6_RPSTAT1);
-               rpupei = intel_uncore_read(uncore, GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
+               rpcurupei = intel_uncore_read(uncore, GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
                rpcurup = intel_uncore_read(uncore, GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
                rpprevup = intel_uncore_read(uncore, GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
-               rpdownei = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
+               rpcurdownei = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
                rpcurdown = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
                rpprevdown = intel_uncore_read(uncore, GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
+
+               rpupei = intel_uncore_read(uncore, GEN6_RP_UP_EI);
+               rpupt = intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD);
+
+               rpdownei = intel_uncore_read(uncore, GEN6_RP_DOWN_EI);
+               rpdownt = intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD);
+
                cagf = intel_rps_read_actual_frequency(rps);
 
                intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
@@ -372,7 +381,7 @@ static int frequency_show(struct seq_file *m, void *unused)
                           yesno(rpmodectl & GEN6_RP_ENABLE));
                seq_printf(m, "SW control enabled: %s\n",
                           yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
-                                 GEN6_RP_MEDIA_SW_MODE));
+                                GEN6_RP_MEDIA_SW_MODE));
 
                seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
                           pm_ier, pm_imr, pm_mask);
@@ -394,23 +403,35 @@ static int frequency_show(struct seq_file *m, void *unused)
                seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
                seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
                seq_printf(m, "CAGF: %dMHz\n", cagf);
-               seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
-                          rpupei, GT_PM_INTERVAL_TO_US(i915, rpupei));
-               seq_printf(m, "RP CUR UP: %d (%dus)\n",
-                          rpcurup, GT_PM_INTERVAL_TO_US(i915, rpcurup));
-               seq_printf(m, "RP PREV UP: %d (%dus)\n",
-                          rpprevup, GT_PM_INTERVAL_TO_US(i915, rpprevup));
+               seq_printf(m, "RP CUR UP EI: %d (%dns)\n",
+                          rpcurupei,
+                          intel_gt_pm_interval_to_ns(gt, rpcurupei));
+               seq_printf(m, "RP CUR UP: %d (%dns)\n",
+                          rpcurup, intel_gt_pm_interval_to_ns(gt, rpcurup));
+               seq_printf(m, "RP PREV UP: %d (%dns)\n",
+                          rpprevup, intel_gt_pm_interval_to_ns(gt, rpprevup));
                seq_printf(m, "Up threshold: %d%%\n",
                           rps->power.up_threshold);
-
-               seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
-                          rpdownei, GT_PM_INTERVAL_TO_US(i915, rpdownei));
-               seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
-                          rpcurdown, GT_PM_INTERVAL_TO_US(i915, rpcurdown));
-               seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
-                          rpprevdown, GT_PM_INTERVAL_TO_US(i915, rpprevdown));
+               seq_printf(m, "RP UP EI: %d (%dns)\n",
+                          rpupei, intel_gt_pm_interval_to_ns(gt, rpupei));
+               seq_printf(m, "RP UP THRESHOLD: %d (%dns)\n",
+                          rpupt, intel_gt_pm_interval_to_ns(gt, rpupt));
+
+               seq_printf(m, "RP CUR DOWN EI: %d (%dns)\n",
+                          rpcurdownei,
+                          intel_gt_pm_interval_to_ns(gt, rpcurdownei));
+               seq_printf(m, "RP CUR DOWN: %d (%dns)\n",
+                          rpcurdown,
+                          intel_gt_pm_interval_to_ns(gt, rpcurdown));
+               seq_printf(m, "RP PREV DOWN: %d (%dns)\n",
+                          rpprevdown,
+                          intel_gt_pm_interval_to_ns(gt, rpprevdown));
                seq_printf(m, "Down threshold: %d%%\n",
                           rps->power.down_threshold);
+               seq_printf(m, "RP DOWN EI: %d (%dns)\n",
+                          rpdownei, intel_gt_pm_interval_to_ns(gt, rpdownei));
+               seq_printf(m, "RP DOWN THRESHOLD: %d (%dns)\n",
+                          rpdownt, intel_gt_pm_interval_to_ns(gt, rpdownt));
 
                max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 0 :
                            rp_state_cap >> 16) & 0xff;
@@ -506,8 +527,10 @@ static int llc_show(struct seq_file *m, void *data)
        return 0;
 }
 
-static bool llc_eval(const struct intel_gt *gt)
+static bool llc_eval(void *data)
 {
+       struct intel_gt *gt = data;
+
        return HAS_LLC(gt->i915);
 }
 
@@ -533,7 +556,8 @@ static int rps_boost_show(struct seq_file *m, void *data)
        struct drm_i915_private *i915 = gt->i915;
        struct intel_rps *rps = &gt->rps;
 
-       seq_printf(m, "RPS enabled? %d\n", rps->enabled);
+       seq_printf(m, "RPS enabled? %s\n", yesno(intel_rps_is_enabled(rps)));
+       seq_printf(m, "RPS active? %s\n", yesno(intel_rps_is_active(rps)));
        seq_printf(m, "GPU busy? %s\n", yesno(gt->awake));
        seq_printf(m, "Boosts outstanding? %d\n",
                   atomic_read(&rps->num_waiters));
@@ -553,7 +577,7 @@ static int rps_boost_show(struct seq_file *m, void *data)
 
        seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
 
-       if (INTEL_GEN(i915) >= 6 && rps->enabled && gt->awake) {
+       if (INTEL_GEN(i915) >= 6 && intel_rps_is_active(rps)) {
                struct intel_uncore *uncore = gt->uncore;
                u32 rpup, rpupei;
                u32 rpdown, rpdownei;
@@ -580,8 +604,10 @@ static int rps_boost_show(struct seq_file *m, void *data)
        return 0;
 }
 
-static bool rps_eval(const struct intel_gt *gt)
+static bool rps_eval(void *data)
 {
+       struct intel_gt *gt = data;
+
        return HAS_RPS(gt->i915);
 }
 
@@ -597,5 +623,5 @@ void debugfs_gt_pm_register(struct intel_gt *gt, struct dentry *root)
                { "rps_boost", &rps_boost_fops, rps_eval },
        };
 
-       debugfs_gt_register_files(gt, root, files, ARRAY_SIZE(files));
+       intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gt);
 }
index 94e746af8926b8c4c0d8b0ffbf90b508758c51e8..699125928272010e1f60359dbd9d1ae0c6d28142 100644 (file)
@@ -389,6 +389,16 @@ static int gen8_ppgtt_alloc(struct i915_address_space *vm,
        return err;
 }
 
+static __always_inline void
+write_pte(gen8_pte_t *pte, const gen8_pte_t val)
+{
+       /* Magic delays? Or can we refine these to flush all in one pass? */
+       *pte = val;
+       wmb(); /* cpu to cache */
+       clflush(pte); /* cache to memory */
+       wmb(); /* visible to all */
+}
+
 static __always_inline u64
 gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
                      struct i915_page_directory *pdp,
@@ -405,7 +415,8 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
        vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
        do {
                GEM_BUG_ON(iter->sg->length < I915_GTT_PAGE_SIZE);
-               vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
+               write_pte(&vaddr[gen8_pd_index(idx, 0)],
+                         pte_encode | iter->dma);
 
                iter->dma += I915_GTT_PAGE_SIZE;
                if (iter->dma >= iter->max) {
@@ -487,7 +498,7 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
 
                do {
                        GEM_BUG_ON(iter->sg->length < page_size);
-                       vaddr[index++] = encode | iter->dma;
+                       write_pte(&vaddr[index++], encode | iter->dma);
 
                        start += page_size;
                        iter->dma += page_size;
index cbad7fe722cebb840f50c46ad73f9f1c63c49c78..d907d538176e3ce93e651c7f2def2723a6b2c13b 100644 (file)
@@ -64,7 +64,7 @@ static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
        if (!--b->irq_enabled)
                irq_disable(engine);
 
-       b->irq_armed = false;
+       WRITE_ONCE(b->irq_armed, false);
        intel_gt_pm_put_async(engine->gt);
 }
 
@@ -73,7 +73,7 @@ void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
        struct intel_breadcrumbs *b = &engine->breadcrumbs;
        unsigned long flags;
 
-       if (!b->irq_armed)
+       if (!READ_ONCE(b->irq_armed))
                return;
 
        spin_lock_irqsave(&b->irq_lock, flags);
@@ -142,6 +142,18 @@ static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl)
        intel_engine_add_retire(engine, tl);
 }
 
+static void __signal_request(struct i915_request *rq, struct list_head *signals)
+{
+       GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
+       clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
+
+       if (!__dma_fence_signal(&rq->fence))
+               return;
+
+       i915_request_get(rq);
+       list_add_tail(&rq->signal_link, signals);
+}
+
 static void signal_irq_work(struct irq_work *work)
 {
        struct intel_breadcrumbs *b = container_of(work, typeof(*b), irq_work);
@@ -155,6 +167,8 @@ static void signal_irq_work(struct irq_work *work)
        if (b->irq_armed && list_empty(&b->signalers))
                __intel_breadcrumbs_disarm_irq(b);
 
+       list_splice_init(&b->signaled_requests, &signal);
+
        list_for_each_entry_safe(ce, cn, &b->signalers, signal_link) {
                GEM_BUG_ON(list_empty(&ce->signals));
 
@@ -163,24 +177,15 @@ static void signal_irq_work(struct irq_work *work)
                                list_entry(pos, typeof(*rq), signal_link);
 
                        GEM_BUG_ON(!check_signal_order(ce, rq));
-
                        if (!__request_completed(rq))
                                break;
 
-                       GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL,
-                                            &rq->fence.flags));
-                       clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
-
-                       if (!__dma_fence_signal(&rq->fence))
-                               continue;
-
                        /*
                         * Queue for execution after dropping the signaling
                         * spinlock as the callback chain may end up adding
                         * more signalers to the same context or engine.
                         */
-                       i915_request_get(rq);
-                       list_add_tail(&rq->signal_link, &signal);
+                       __signal_request(rq, &signal);
                }
 
                /*
@@ -233,7 +238,7 @@ static bool __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
         * which we can add a new waiter and avoid the cost of re-enabling
         * the irq.
         */
-       b->irq_armed = true;
+       WRITE_ONCE(b->irq_armed, true);
 
        /*
         * Since we are waiting on a request, the GPU should be busy
@@ -255,6 +260,7 @@ void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
 
        spin_lock_init(&b->irq_lock);
        INIT_LIST_HEAD(&b->signalers);
+       INIT_LIST_HEAD(&b->signaled_requests);
 
        init_irq_work(&b->irq_work, signal_irq_work);
 }
@@ -274,6 +280,32 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
        spin_unlock_irqrestore(&b->irq_lock, flags);
 }
 
+void intel_engine_transfer_stale_breadcrumbs(struct intel_engine_cs *engine,
+                                            struct intel_context *ce)
+{
+       struct intel_breadcrumbs *b = &engine->breadcrumbs;
+       unsigned long flags;
+
+       spin_lock_irqsave(&b->irq_lock, flags);
+       if (!list_empty(&ce->signals)) {
+               struct i915_request *rq, *next;
+
+               /* Queue for executing the signal callbacks in the irq_work */
+               list_for_each_entry_safe(rq, next, &ce->signals, signal_link) {
+                       GEM_BUG_ON(rq->engine != engine);
+                       GEM_BUG_ON(!__request_completed(rq));
+
+                       __signal_request(rq, &b->signaled_requests);
+               }
+
+               INIT_LIST_HEAD(&ce->signals);
+               list_del_init(&ce->signal_link);
+
+               irq_work_queue(&b->irq_work);
+       }
+       spin_unlock_irqrestore(&b->irq_lock, flags);
+}
+
 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
 {
 }
index aea992e46c423cc4a713069203bfbe304049a70e..74ddb49b29415503b992b0696b4e59a57fa4373b 100644 (file)
@@ -114,6 +114,11 @@ int __intel_context_do_pin(struct intel_context *ce)
                goto out_release;
        }
 
+       if (unlikely(intel_context_is_closed(ce))) {
+               err = -ENOENT;
+               goto out_unlock;
+       }
+
        if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) {
                err = intel_context_active_acquire(ce);
                if (unlikely(err))
index 57a30956c922b5fd17a46e9d0d4f81713cd6070a..487299cb91f2e4a41e39a388726a39d875976ce7 100644 (file)
@@ -25,8 +25,7 @@ static int gen8_emit_rpcs_config(struct i915_request *rq,
                return PTR_ERR(cs);
 
        offset = i915_ggtt_offset(ce->state) +
-                LRC_STATE_PN * PAGE_SIZE +
-                CTX_R_PWR_CLK_STATE * 4;
+                LRC_STATE_OFFSET + CTX_R_PWR_CLK_STATE * 4;
 
        *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
        *cs++ = lower_32_bits(offset);
index ca0d4f4f3615b6d7ab78392bc3e002818938d63f..4954b0df4864616497bbcce8b9a84d6e0e620446 100644 (file)
@@ -102,6 +102,8 @@ struct intel_context {
 
        /** sseu: Control eu/slice partitioning */
        struct intel_sseu sseu;
+
+       u8 wa_bb_page; /* if set, page num reserved for context workarounds */
 };
 
 #endif /* __INTEL_CONTEXT_TYPES__ */
index a1aa0d3e8be1d5c51badcc555dbe94c92aa78953..9bf6d49899688a36320a32b96b831f3ff693349f 100644 (file)
@@ -199,6 +199,8 @@ void intel_engine_cleanup(struct intel_engine_cs *engine);
 int intel_engines_init_mmio(struct intel_gt *gt);
 int intel_engines_init(struct intel_gt *gt);
 
+void intel_engine_free_request_pool(struct intel_engine_cs *engine);
+
 void intel_engines_release(struct intel_gt *gt);
 void intel_engines_free(struct intel_gt *gt);
 
@@ -236,22 +238,35 @@ intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
 void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
 
+void intel_engine_transfer_stale_breadcrumbs(struct intel_engine_cs *engine,
+                                            struct intel_context *ce);
+
 void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
                                    struct drm_printer *p);
 
-static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
+static inline u32 *__gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
 {
        memset(batch, 0, 6 * sizeof(u32));
 
-       batch[0] = GFX_OP_PIPE_CONTROL(6);
-       batch[1] = flags;
+       batch[0] = GFX_OP_PIPE_CONTROL(6) | flags0;
+       batch[1] = flags1;
        batch[2] = offset;
 
        return batch + 6;
 }
 
+static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
+{
+       return __gen8_emit_pipe_control(batch, 0, flags, offset);
+}
+
+static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
+{
+       return __gen8_emit_pipe_control(batch, flags0, flags1, offset);
+}
+
 static inline u32 *
-gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
+__gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags0, u32 flags1)
 {
        /* We're using qword write, offset should be aligned to 8 bytes. */
        GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
@@ -260,8 +275,8 @@ gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
         * need a prior CS_STALL, which is emitted by the flush
         * following the batch.
         */
-       *cs++ = GFX_OP_PIPE_CONTROL(6);
-       *cs++ = flags | PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_GLOBAL_GTT_IVB;
+       *cs++ = GFX_OP_PIPE_CONTROL(6) | flags0;
+       *cs++ = flags1 | PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_GLOBAL_GTT_IVB;
        *cs++ = gtt_offset;
        *cs++ = 0;
        *cs++ = value;
@@ -271,6 +286,18 @@ gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
        return cs;
 }
 
+static inline u32*
+gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
+{
+       return __gen8_emit_ggtt_write_rcs(cs, value, gtt_offset, 0, flags);
+}
+
+static inline u32*
+gen12_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags0, u32 flags1)
+{
+       return __gen8_emit_ggtt_write_rcs(cs, value, gtt_offset, flags0, flags1);
+}
+
 static inline u32 *
 gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
 {
@@ -308,9 +335,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
                       struct drm_printer *m,
                       const char *header, ...);
 
-int intel_enable_engine_stats(struct intel_engine_cs *engine);
-void intel_disable_engine_stats(struct intel_engine_cs *engine);
-
 ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine);
 
 struct i915_request *
index 883a9b7fe88d5a6a0b4398d3a3793b727b015343..da5b61085257e3005db4222bbcf9a62baa0308fb 100644 (file)
@@ -31,7 +31,6 @@
 #include "intel_context.h"
 #include "intel_engine.h"
 #include "intel_engine_pm.h"
-#include "intel_engine_pool.h"
 #include "intel_engine_user.h"
 #include "intel_gt.h"
 #include "intel_gt_requests.h"
@@ -327,6 +326,8 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
        if (INTEL_GEN(i915) == 12 && engine->class == RENDER_CLASS)
                engine->props.preempt_timeout_ms = 0;
 
+       engine->defaults = engine->props; /* never to change again */
+
        engine->context_size = intel_engine_context_size(gt, engine->class);
        if (WARN_ON(engine->context_size > BIT(20)))
                engine->context_size = 0;
@@ -347,8 +348,6 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
        gt->engine_class[info->class][info->instance] = engine;
        gt->engine[id] = engine;
 
-       i915->engine[id] = engine;
-
        return 0;
 }
 
@@ -425,17 +424,27 @@ void intel_engines_release(struct intel_gt *gt)
                engine->release = NULL;
 
                memset(&engine->reset, 0, sizeof(engine->reset));
-
-               gt->i915->engine[id] = NULL;
        }
 }
 
+void intel_engine_free_request_pool(struct intel_engine_cs *engine)
+{
+       if (!engine->request_pool)
+               return;
+
+       kmem_cache_free(i915_request_slab_cache(), engine->request_pool);
+}
+
 void intel_engines_free(struct intel_gt *gt)
 {
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
 
+       /* Free the requests! dma-resv keeps fences around for an eternity */
+       rcu_barrier();
+
        for_each_engine(engine, gt, id) {
+               intel_engine_free_request_pool(engine);
                kfree(engine);
                gt->engine[id] = NULL;
        }
@@ -623,8 +632,6 @@ static int engine_setup_common(struct intel_engine_cs *engine)
        intel_engine_init__pm(engine);
        intel_engine_init_retire(engine);
 
-       intel_engine_pool_init(&engine->pool);
-
        /* Use the whole device by default */
        engine->sseu =
                intel_sseu_from_device_info(&RUNTIME_INFO(engine->i915)->sseu);
@@ -821,12 +828,11 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
        cleanup_status_page(engine);
 
        intel_engine_fini_retire(engine);
-       intel_engine_pool_fini(&engine->pool);
        intel_engine_fini_breadcrumbs(engine);
        intel_engine_cleanup_cmd_parser(engine);
 
        if (engine->default_state)
-               i915_gem_object_put(engine->default_state);
+               fput(engine->default_state);
 
        if (engine->kernel_context) {
                intel_context_unpin(engine->kernel_context);
@@ -1225,6 +1231,49 @@ static void print_request(struct drm_printer *m,
                   name);
 }
 
+static struct intel_timeline *get_timeline(struct i915_request *rq)
+{
+       struct intel_timeline *tl;
+
+       /*
+        * Even though we are holding the engine->active.lock here, there
+        * is no control over the submission queue per-se and we are
+        * inspecting the active state at a random point in time, with an
+        * unknown queue. Play safe and make sure the timeline remains valid.
+        * (Only being used for pretty printing, one extra kref shouldn't
+        * cause a camel stampede!)
+        */
+       rcu_read_lock();
+       tl = rcu_dereference(rq->timeline);
+       if (!kref_get_unless_zero(&tl->kref))
+               tl = NULL;
+       rcu_read_unlock();
+
+       return tl;
+}
+
+static int print_ring(char *buf, int sz, struct i915_request *rq)
+{
+       int len = 0;
+
+       if (!i915_request_signaled(rq)) {
+               struct intel_timeline *tl = get_timeline(rq);
+
+               len = scnprintf(buf, sz,
+                               "ring:{start:%08x, hwsp:%08x, seqno:%08x, runtime:%llums}, ",
+                               i915_ggtt_offset(rq->ring->vma),
+                               tl ? tl->hwsp_offset : 0,
+                               hwsp_seqno(rq),
+                               DIV_ROUND_CLOSEST_ULL(intel_context_get_total_runtime_ns(rq->context),
+                                                     1000 * 1000));
+
+               if (tl)
+                       intel_timeline_put(tl);
+       }
+
+       return len;
+}
+
 static void hexdump(struct drm_printer *m, const void *buf, size_t len)
 {
        const size_t rowsize = 8 * sizeof(u32);
@@ -1254,27 +1303,6 @@ static void hexdump(struct drm_printer *m, const void *buf, size_t len)
        }
 }
 
-static struct intel_timeline *get_timeline(struct i915_request *rq)
-{
-       struct intel_timeline *tl;
-
-       /*
-        * Even though we are holding the engine->active.lock here, there
-        * is no control over the submission queue per-se and we are
-        * inspecting the active state at a random point in time, with an
-        * unknown queue. Play safe and make sure the timeline remains valid.
-        * (Only being used for pretty printing, one extra kref shouldn't
-        * cause a camel stampede!)
-        */
-       rcu_read_lock();
-       tl = rcu_dereference(rq->timeline);
-       if (!kref_get_unless_zero(&tl->kref))
-               tl = NULL;
-       rcu_read_unlock();
-
-       return tl;
-}
-
 static const char *repr_timer(const struct timer_list *t)
 {
        if (!READ_ONCE(t->expires))
@@ -1393,39 +1421,24 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
                        int len;
 
                        len = scnprintf(hdr, sizeof(hdr),
-                                       "\t\tActive[%d]: ",
-                                       (int)(port - execlists->active));
-                       if (!i915_request_signaled(rq)) {
-                               struct intel_timeline *tl = get_timeline(rq);
-
-                               len += scnprintf(hdr + len, sizeof(hdr) - len,
-                                                "ring:{start:%08x, hwsp:%08x, seqno:%08x, runtime:%llums}, ",
-                                                i915_ggtt_offset(rq->ring->vma),
-                                                tl ? tl->hwsp_offset : 0,
-                                                hwsp_seqno(rq),
-                                                DIV_ROUND_CLOSEST_ULL(intel_context_get_total_runtime_ns(rq->context),
-                                                                      1000 * 1000));
-
-                               if (tl)
-                                       intel_timeline_put(tl);
-                       }
+                                       "\t\tActive[%d]:  ccid:%08x, ",
+                                       (int)(port - execlists->active),
+                                       rq->context->lrc.ccid);
+                       len += print_ring(hdr + len, sizeof(hdr) - len, rq);
                        scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
                        print_request(m, rq, hdr);
                }
                for (port = execlists->pending; (rq = *port); port++) {
-                       struct intel_timeline *tl = get_timeline(rq);
-                       char hdr[80];
-
-                       snprintf(hdr, sizeof(hdr),
-                                "\t\tPending[%d] ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ",
-                                (int)(port - execlists->pending),
-                                i915_ggtt_offset(rq->ring->vma),
-                                tl ? tl->hwsp_offset : 0,
-                                hwsp_seqno(rq));
-                       print_request(m, rq, hdr);
+                       char hdr[160];
+                       int len;
 
-                       if (tl)
-                               intel_timeline_put(tl);
+                       len = scnprintf(hdr, sizeof(hdr),
+                                       "\t\tPending[%d]: ccid:%08x, ",
+                                       (int)(port - execlists->pending),
+                                       rq->context->lrc.ccid);
+                       len += print_ring(hdr + len, sizeof(hdr) - len, rq);
+                       scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
+                       print_request(m, rq, hdr);
                }
                rcu_read_unlock();
                execlists_active_unlock_bh(execlists);
@@ -1574,58 +1587,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
        intel_engine_print_breadcrumbs(engine, m);
 }
 
-/**
- * intel_enable_engine_stats() - Enable engine busy tracking on engine
- * @engine: engine to enable stats collection
- *
- * Start collecting the engine busyness data for @engine.
- *
- * Returns 0 on success or a negative error code.
- */
-int intel_enable_engine_stats(struct intel_engine_cs *engine)
-{
-       struct intel_engine_execlists *execlists = &engine->execlists;
-       unsigned long flags;
-       int err = 0;
-
-       if (!intel_engine_supports_stats(engine))
-               return -ENODEV;
-
-       execlists_active_lock_bh(execlists);
-       write_seqlock_irqsave(&engine->stats.lock, flags);
-
-       if (unlikely(engine->stats.enabled == ~0)) {
-               err = -EBUSY;
-               goto unlock;
-       }
-
-       if (engine->stats.enabled++ == 0) {
-               struct i915_request * const *port;
-               struct i915_request *rq;
-
-               engine->stats.enabled_at = ktime_get();
-
-               /* XXX submission method oblivious? */
-               for (port = execlists->active; (rq = *port); port++)
-                       engine->stats.active++;
-
-               for (port = execlists->pending; (rq = *port); port++) {
-                       /* Exclude any contexts already counted in active */
-                       if (!intel_context_inflight_count(rq->context))
-                               engine->stats.active++;
-               }
-
-               if (engine->stats.active)
-                       engine->stats.start = engine->stats.enabled_at;
-       }
-
-unlock:
-       write_sequnlock_irqrestore(&engine->stats.lock, flags);
-       execlists_active_unlock_bh(execlists);
-
-       return err;
-}
-
 static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
 {
        ktime_t total = engine->stats.total;
@@ -1634,7 +1595,7 @@ static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
         * If the engine is executing something at the moment
         * add it to the total.
         */
-       if (engine->stats.active)
+       if (atomic_read(&engine->stats.active))
                total = ktime_add(total,
                                  ktime_sub(ktime_get(), engine->stats.start));
 
@@ -1660,28 +1621,6 @@ ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine)
        return total;
 }
 
-/**
- * intel_disable_engine_stats() - Disable engine busy tracking on engine
- * @engine: engine to disable stats collection
- *
- * Stops collecting the engine busyness data for @engine.
- */
-void intel_disable_engine_stats(struct intel_engine_cs *engine)
-{
-       unsigned long flags;
-
-       if (!intel_engine_supports_stats(engine))
-               return;
-
-       write_seqlock_irqsave(&engine->stats.lock, flags);
-       WARN_ON_ONCE(engine->stats.enabled == 0);
-       if (--engine->stats.enabled == 0) {
-               engine->stats.total = __intel_engine_get_busy_time(engine);
-               engine->stats.active = 0;
-       }
-       write_sequnlock_irqrestore(&engine->stats.lock, flags);
-}
-
 static bool match_ring(struct i915_request *rq)
 {
        u32 ring = ENGINE_READ(rq->engine, RING_START);
index dd825718e4e5504b5d62e4a66f3b16ebc0685387..5136c8bf112d14b87ccece929002a9838e19e5ec 100644 (file)
@@ -31,7 +31,7 @@ static bool next_heartbeat(struct intel_engine_cs *engine)
        delay = msecs_to_jiffies_timeout(delay);
        if (delay >= HZ)
                delay = round_jiffies_up_relative(delay);
-       schedule_delayed_work(&engine->heartbeat.work, delay);
+       mod_delayed_work(system_wq, &engine->heartbeat.work, delay);
 
        return true;
 }
index b6cf284e3a2d5b970ee7c144a78ad5bef53a03d7..d0a1078ef63249909960efb336f951b2c911a6df 100644 (file)
 #include "intel_engine.h"
 #include "intel_engine_heartbeat.h"
 #include "intel_engine_pm.h"
-#include "intel_engine_pool.h"
 #include "intel_gt.h"
 #include "intel_gt_pm.h"
 #include "intel_rc6.h"
 #include "intel_ring.h"
+#include "shmem_utils.h"
 
 static int __engine_unpark(struct intel_wakeref *wf)
 {
        struct intel_engine_cs *engine =
                container_of(wf, typeof(*engine), wakeref);
        struct intel_context *ce;
-       void *map;
 
        ENGINE_TRACE(engine, "\n");
 
        intel_gt_pm_get(engine->gt);
 
-       /* Pin the default state for fast resets from atomic context. */
-       map = NULL;
-       if (engine->default_state)
-               map = i915_gem_object_pin_map(engine->default_state,
-                                             I915_MAP_WB);
-       if (!IS_ERR_OR_NULL(map))
-               engine->pinned_default_state = map;
-
        /* Discard stale context state from across idling */
        ce = engine->kernel_context;
        if (ce) {
@@ -44,6 +35,7 @@ static int __engine_unpark(struct intel_wakeref *wf)
                if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && ce->state) {
                        struct drm_i915_gem_object *obj = ce->state->obj;
                        int type = i915_coherent_map_type(engine->i915);
+                       void *map;
 
                        map = i915_gem_object_pin_map(obj, type);
                        if (!IS_ERR(map)) {
@@ -181,7 +173,7 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
         * Ergo, if we put ourselves on the timelines.active_list
         * (se intel_timeline_enter()) before we increment the
         * engine->wakeref.count, we may see the request completion and retire
-        * it causing an undeflow of the engine->wakeref.
+        * it causing an underflow of the engine->wakeref.
         */
        flags = __timeline_mark_lock(ce);
        GEM_BUG_ON(atomic_read(&ce->timeline->active_count) < 0);
@@ -255,7 +247,6 @@ static int __engine_park(struct intel_wakeref *wf)
 
        intel_engine_park_heartbeat(engine);
        intel_engine_disarm_breadcrumbs(engine);
-       intel_engine_pool_park(&engine->pool);
 
        /* Must be reset upon idling, or we may miss the busy wakeup. */
        GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN);
@@ -263,11 +254,6 @@ static int __engine_park(struct intel_wakeref *wf)
        if (engine->park)
                engine->park(engine);
 
-       if (engine->pinned_default_state) {
-               i915_gem_object_unpin_map(engine->default_state);
-               engine->pinned_default_state = NULL;
-       }
-
        engine->execlists.no_priolist = false;
 
        /* While gt calls i915_vma_parked(), we have to break the lock cycle */
index e52c2b0cb24518a28dd7c22cad8ad49b6e417d59..418df0a1314564bbe081a33f3f973c44150027cb 100644 (file)
@@ -37,6 +37,12 @@ static inline void intel_engine_pm_put_async(struct intel_engine_cs *engine)
        intel_wakeref_put_async(&engine->wakeref);
 }
 
+static inline void intel_engine_pm_put_delay(struct intel_engine_cs *engine,
+                                            unsigned long delay)
+{
+       intel_wakeref_put_delay(&engine->wakeref, delay);
+}
+
 static inline void intel_engine_pm_flush(struct intel_engine_cs *engine)
 {
        intel_wakeref_unlock_wait(&engine->wakeref);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.h b/drivers/gpu/drm/i915/gt/intel_engine_pool.h
deleted file mode 100644 (file)
index 1bd89ca..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2014-2018 Intel Corporation
- */
-
-#ifndef INTEL_ENGINE_POOL_H
-#define INTEL_ENGINE_POOL_H
-
-#include "intel_engine_pool_types.h"
-#include "i915_active.h"
-#include "i915_request.h"
-
-struct intel_engine_pool_node *
-intel_engine_get_pool(struct intel_engine_cs *engine, size_t size);
-
-static inline int
-intel_engine_pool_mark_active(struct intel_engine_pool_node *node,
-                             struct i915_request *rq)
-{
-       return i915_active_add_request(&node->active, rq);
-}
-
-static inline void
-intel_engine_pool_put(struct intel_engine_pool_node *node)
-{
-       i915_active_release(&node->active);
-}
-
-void intel_engine_pool_init(struct intel_engine_pool *pool);
-void intel_engine_pool_park(struct intel_engine_pool *pool);
-void intel_engine_pool_fini(struct intel_engine_pool *pool);
-
-#endif /* INTEL_ENGINE_POOL_H */
index 0be674ae1cf63ff4a2adf665696dca8f8b9b74a1..2b6cdf47d42801c0c8b6d915bac66b078722b8a5 100644 (file)
@@ -22,7 +22,6 @@
 #include "i915_pmu.h"
 #include "i915_priolist_types.h"
 #include "i915_selftest.h"
-#include "intel_engine_pool_types.h"
 #include "intel_sseu.h"
 #include "intel_timeline_types.h"
 #include "intel_wakeref.h"
@@ -180,6 +179,11 @@ struct intel_engine_execlists {
         */
        u32 error_interrupt;
 
+       /**
+        * @reset_ccid: Active CCID [EXECLISTS_STATUS_HI] at the time of reset
+        */
+       u32 reset_ccid;
+
        /**
         * @no_priolist: priority lists disabled
         */
@@ -321,6 +325,9 @@ struct intel_engine_cs {
                struct list_head hold; /* ready requests, but on hold */
        } active;
 
+       /* keep a request in reserve for a [pm] barrier under oom */
+       struct i915_request *request_pool;
+
        struct llist_head barrier_tasks;
 
        struct intel_context *kernel_context; /* pinned */
@@ -336,8 +343,7 @@ struct intel_engine_cs {
 
        unsigned long wakeref_serial;
        struct intel_wakeref wakeref;
-       struct drm_i915_gem_object *default_state;
-       void *pinned_default_state;
+       struct file *default_state;
 
        struct {
                struct intel_ring *ring;
@@ -371,6 +377,8 @@ struct intel_engine_cs {
                spinlock_t irq_lock;
                struct list_head signalers;
 
+               struct list_head signaled_requests;
+
                struct irq_work irq_work; /* for use from inside irq_lock */
 
                unsigned int irq_enabled;
@@ -402,13 +410,6 @@ struct intel_engine_cs {
                struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT];
        } pmu;
 
-       /*
-        * A pool of objects to use as shadow copies of client batch buffers
-        * when the command parser is enabled. Prevents the client from
-        * modifying the batch contents after software parsing.
-        */
-       struct intel_engine_pool pool;
-
        struct intel_hw_status_page status_page;
        struct i915_ctx_workarounds wa_ctx;
        struct i915_wa_list ctx_wa_list;
@@ -420,6 +421,7 @@ struct intel_engine_cs {
        void            (*irq_enable)(struct intel_engine_cs *engine);
        void            (*irq_disable)(struct intel_engine_cs *engine);
 
+       void            (*sanitize)(struct intel_engine_cs *engine);
        int             (*resume)(struct intel_engine_cs *engine);
 
        struct {
@@ -529,34 +531,34 @@ struct intel_engine_cs {
 
        struct {
                /**
-                * @lock: Lock protecting the below fields.
-                */
-               seqlock_t lock;
-               /**
-                * @enabled: Reference count indicating number of listeners.
+                * @active: Number of contexts currently scheduled in.
                 */
-               unsigned int enabled;
+               atomic_t active;
+
                /**
-                * @active: Number of contexts currently scheduled in.
+                * @lock: Lock protecting the below fields.
                 */
-               unsigned int active;
+               seqlock_t lock;
+
                /**
-                * @enabled_at: Timestamp when busy stats were enabled.
+                * @total: Total time this engine was busy.
+                *
+                * Accumulated time not counting the most recent block in cases
+                * where engine is currently busy (active > 0).
                 */
-               ktime_t enabled_at;
+               ktime_t total;
+
                /**
                 * @start: Timestamp of the last idle to active transition.
                 *
                 * Idle is defined as active == 0, active is active > 0.
                 */
                ktime_t start;
+
                /**
-                * @total: Total time this engine was busy.
-                *
-                * Accumulated time not counting the most recent block in cases
-                * where engine is currently busy (active > 0).
+                * @rps: Utilisation at last RPS sampling.
                 */
-               ktime_t total;
+               ktime_t rps;
        } stats;
 
        struct {
@@ -565,7 +567,7 @@ struct intel_engine_cs {
                unsigned long preempt_timeout_ms;
                unsigned long stop_timeout_ms;
                unsigned long timeslice_duration_ms;
-       } props;
+       } props, defaults;
 };
 
 static inline bool
index 4c5a209cb66954adb398e15d3b12ce2cb3f12fb8..66165b10256e542b3a30ab55f7997da219008c14 100644 (file)
@@ -65,7 +65,7 @@ static int ggtt_init_hw(struct i915_ggtt *ggtt)
                                              ggtt->mappable_end);
        }
 
-       i915_ggtt_init_fences(ggtt);
+       intel_ggtt_init_fences(ggtt);
 
        return 0;
 }
@@ -715,11 +715,13 @@ static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
  */
 void i915_ggtt_driver_release(struct drm_i915_private *i915)
 {
+       struct i915_ggtt *ggtt = &i915->ggtt;
        struct pagevec *pvec;
 
-       fini_aliasing_ppgtt(&i915->ggtt);
+       fini_aliasing_ppgtt(ggtt);
 
-       ggtt_cleanup_hw(&i915->ggtt);
+       intel_ggtt_fini_fences(ggtt);
+       ggtt_cleanup_hw(ggtt);
 
        pvec = &i915->mm.wc_stash.pvec;
        if (pvec->nr) {
@@ -784,13 +786,13 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
        else
                ggtt->gsm = ioremap_wc(phys_addr, size);
        if (!ggtt->gsm) {
-               DRM_ERROR("Failed to map the ggtt page table\n");
+               drm_err(&i915->drm, "Failed to map the ggtt page table\n");
                return -ENOMEM;
        }
 
        ret = setup_scratch_page(&ggtt->vm, GFP_DMA32);
        if (ret) {
-               DRM_ERROR("Scratch setup failed\n");
+               drm_err(&i915->drm, "Scratch setup failed\n");
                /* iounmap will also get called at remove, but meh */
                iounmap(ggtt->gsm);
                return ret;
@@ -838,7 +840,6 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
        struct pci_dev *pdev = i915->drm.pdev;
        unsigned int size;
        u16 snb_gmch_ctl;
-       int err;
 
        /* TODO: We're not aware of mappable constraints on gen8 yet */
        if (!IS_DGFX(i915)) {
@@ -846,12 +847,6 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
                ggtt->mappable_end = resource_size(&ggtt->gmadr);
        }
 
-       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
-       if (!err)
-               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
-       if (err)
-               DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
-
        pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
        if (IS_CHERRYVIEW(i915))
                size = chv_get_total_gtt_size(snb_gmch_ctl);
@@ -987,7 +982,6 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
        struct pci_dev *pdev = i915->drm.pdev;
        unsigned int size;
        u16 snb_gmch_ctl;
-       int err;
 
        ggtt->gmadr = pci_resource(pdev, 2);
        ggtt->mappable_end = resource_size(&ggtt->gmadr);
@@ -997,15 +991,11 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
         * just a coarse sanity check.
         */
        if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
-               DRM_ERROR("Unknown GMADR size (%pa)\n", &ggtt->mappable_end);
+               drm_err(&i915->drm, "Unknown GMADR size (%pa)\n",
+                       &ggtt->mappable_end);
                return -ENXIO;
        }
 
-       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
-       if (!err)
-               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
-       if (err)
-               DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
        pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
 
        size = gen6_get_total_gtt_size(snb_gmch_ctl);
@@ -1052,7 +1042,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
 
        ret = intel_gmch_probe(i915->bridge_dev, i915->drm.pdev, NULL);
        if (!ret) {
-               DRM_ERROR("failed to set up gmch\n");
+               drm_err(&i915->drm, "failed to set up gmch\n");
                return -EIO;
        }
 
@@ -1075,7 +1065,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
        ggtt->vm.vma_ops.clear_pages = clear_pages;
 
        if (unlikely(ggtt->do_idle_maps))
-               dev_notice(i915->drm.dev,
+               drm_notice(&i915->drm,
                           "Applying Ironlake quirks for intel_iommu\n");
 
        return 0;
@@ -1100,26 +1090,29 @@ static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
                return ret;
 
        if ((ggtt->vm.total - 1) >> 32) {
-               DRM_ERROR("We never expected a Global GTT with more than 32bits"
-                         " of address space! Found %lldM!\n",
-                         ggtt->vm.total >> 20);
+               drm_err(&i915->drm,
+                       "We never expected a Global GTT with more than 32bits"
+                       " of address space! Found %lldM!\n",
+                       ggtt->vm.total >> 20);
                ggtt->vm.total = 1ULL << 32;
                ggtt->mappable_end =
                        min_t(u64, ggtt->mappable_end, ggtt->vm.total);
        }
 
        if (ggtt->mappable_end > ggtt->vm.total) {
-               DRM_ERROR("mappable aperture extends past end of GGTT,"
-                         " aperture=%pa, total=%llx\n",
-                         &ggtt->mappable_end, ggtt->vm.total);
+               drm_err(&i915->drm,
+                       "mappable aperture extends past end of GGTT,"
+                       " aperture=%pa, total=%llx\n",
+                       &ggtt->mappable_end, ggtt->vm.total);
                ggtt->mappable_end = ggtt->vm.total;
        }
 
        /* GMADR is the PCI mmio aperture into the global GTT. */
-       DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20);
-       DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
-       DRM_DEBUG_DRIVER("DSM size = %lluM\n",
-                        (u64)resource_size(&intel_graphics_stolen_res) >> 20);
+       drm_dbg(&i915->drm, "GGTT size = %lluM\n", ggtt->vm.total >> 20);
+       drm_dbg(&i915->drm, "GMADR size = %lluM\n",
+               (u64)ggtt->mappable_end >> 20);
+       drm_dbg(&i915->drm, "DSM size = %lluM\n",
+               (u64)resource_size(&intel_graphics_stolen_res) >> 20);
 
        return 0;
 }
@@ -1137,7 +1130,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *i915)
                return ret;
 
        if (intel_vtd_active())
-               dev_info(i915->drm.dev, "VT-d active for gfx access\n");
+               drm_info(&i915->drm, "VT-d active for gfx access\n");
 
        return 0;
 }
@@ -1212,6 +1205,8 @@ void i915_ggtt_resume(struct i915_ggtt *ggtt)
 
        if (INTEL_GEN(ggtt->vm.i915) >= 8)
                setup_private_pat(ggtt->vm.gt->uncore);
+
+       intel_ggtt_restore_fences(ggtt);
 }
 
 static struct scatterlist *
similarity index 88%
rename from drivers/gpu/drm/i915/i915_gem_fence_reg.c
rename to drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index d152b648c73c76a361d35b5eba8032854d23ee98..7fb36b12fe7a2aabf8e64bfe6ad7d1cb3df4503e 100644 (file)
@@ -68,8 +68,7 @@ static struct intel_uncore *fence_to_uncore(struct i915_fence_reg *fence)
        return fence->ggtt->vm.gt->uncore;
 }
 
-static void i965_write_fence_reg(struct i915_fence_reg *fence,
-                                struct i915_vma *vma)
+static void i965_write_fence_reg(struct i915_fence_reg *fence)
 {
        i915_reg_t fence_reg_lo, fence_reg_hi;
        int fence_pitch_shift;
@@ -87,18 +86,16 @@ static void i965_write_fence_reg(struct i915_fence_reg *fence,
        }
 
        val = 0;
-       if (vma) {
-               unsigned int stride = i915_gem_object_get_stride(vma->obj);
+       if (fence->tiling) {
+               unsigned int stride = fence->stride;
 
-               GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
-               GEM_BUG_ON(!IS_ALIGNED(vma->node.start, I965_FENCE_PAGE));
-               GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I965_FENCE_PAGE));
                GEM_BUG_ON(!IS_ALIGNED(stride, 128));
 
-               val = (vma->node.start + vma->fence_size - I965_FENCE_PAGE) << 32;
-               val |= vma->node.start;
+               val = fence->start + fence->size - I965_FENCE_PAGE;
+               val <<= 32;
+               val |= fence->start;
                val |= (u64)((stride / 128) - 1) << fence_pitch_shift;
-               if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y)
+               if (fence->tiling == I915_TILING_Y)
                        val |= BIT(I965_FENCE_TILING_Y_SHIFT);
                val |= I965_FENCE_REG_VALID;
        }
@@ -125,21 +122,15 @@ static void i965_write_fence_reg(struct i915_fence_reg *fence,
        }
 }
 
-static void i915_write_fence_reg(struct i915_fence_reg *fence,
-                                struct i915_vma *vma)
+static void i915_write_fence_reg(struct i915_fence_reg *fence)
 {
        u32 val;
 
        val = 0;
-       if (vma) {
-               unsigned int tiling = i915_gem_object_get_tiling(vma->obj);
+       if (fence->tiling) {
+               unsigned int stride = fence->stride;
+               unsigned int tiling = fence->tiling;
                bool is_y_tiled = tiling == I915_TILING_Y;
-               unsigned int stride = i915_gem_object_get_stride(vma->obj);
-
-               GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
-               GEM_BUG_ON(vma->node.start & ~I915_FENCE_START_MASK);
-               GEM_BUG_ON(!is_power_of_2(vma->fence_size));
-               GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));
 
                if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence_to_i915(fence)))
                        stride /= 128;
@@ -147,10 +138,10 @@ static void i915_write_fence_reg(struct i915_fence_reg *fence,
                        stride /= 512;
                GEM_BUG_ON(!is_power_of_2(stride));
 
-               val = vma->node.start;
+               val = fence->start;
                if (is_y_tiled)
                        val |= BIT(I830_FENCE_TILING_Y_SHIFT);
-               val |= I915_FENCE_SIZE_BITS(vma->fence_size);
+               val |= I915_FENCE_SIZE_BITS(fence->size);
                val |= ilog2(stride) << I830_FENCE_PITCH_SHIFT;
 
                val |= I830_FENCE_REG_VALID;
@@ -165,25 +156,18 @@ static void i915_write_fence_reg(struct i915_fence_reg *fence,
        }
 }
 
-static void i830_write_fence_reg(struct i915_fence_reg *fence,
-                                struct i915_vma *vma)
+static void i830_write_fence_reg(struct i915_fence_reg *fence)
 {
        u32 val;
 
        val = 0;
-       if (vma) {
-               unsigned int stride = i915_gem_object_get_stride(vma->obj);
-
-               GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
-               GEM_BUG_ON(vma->node.start & ~I830_FENCE_START_MASK);
-               GEM_BUG_ON(!is_power_of_2(vma->fence_size));
-               GEM_BUG_ON(!is_power_of_2(stride / 128));
-               GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));
+       if (fence->tiling) {
+               unsigned int stride = fence->stride;
 
-               val = vma->node.start;
-               if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y)
+               val = fence->start;
+               if (fence->tiling == I915_TILING_Y)
                        val |= BIT(I830_FENCE_TILING_Y_SHIFT);
-               val |= I830_FENCE_SIZE_BITS(vma->fence_size);
+               val |= I830_FENCE_SIZE_BITS(fence->size);
                val |= ilog2(stride / 128) << I830_FENCE_PITCH_SHIFT;
                val |= I830_FENCE_REG_VALID;
        }
@@ -197,8 +181,7 @@ static void i830_write_fence_reg(struct i915_fence_reg *fence,
        }
 }
 
-static void fence_write(struct i915_fence_reg *fence,
-                       struct i915_vma *vma)
+static void fence_write(struct i915_fence_reg *fence)
 {
        struct drm_i915_private *i915 = fence_to_i915(fence);
 
@@ -209,18 +192,21 @@ static void fence_write(struct i915_fence_reg *fence,
         */
 
        if (IS_GEN(i915, 2))
-               i830_write_fence_reg(fence, vma);
+               i830_write_fence_reg(fence);
        else if (IS_GEN(i915, 3))
-               i915_write_fence_reg(fence, vma);
+               i915_write_fence_reg(fence);
        else
-               i965_write_fence_reg(fence, vma);
+               i965_write_fence_reg(fence);
 
        /*
         * Access through the fenced region afterwards is
         * ordered by the posting reads whilst writing the registers.
         */
+}
 
-       fence->dirty = false;
+static bool gpu_uses_fence_registers(struct i915_fence_reg *fence)
+{
+       return INTEL_GEN(fence_to_i915(fence)) < 4;
 }
 
 static int fence_update(struct i915_fence_reg *fence,
@@ -232,27 +218,32 @@ static int fence_update(struct i915_fence_reg *fence,
        struct i915_vma *old;
        int ret;
 
+       fence->tiling = 0;
        if (vma) {
+               GEM_BUG_ON(!i915_gem_object_get_stride(vma->obj) ||
+                          !i915_gem_object_get_tiling(vma->obj));
+
                if (!i915_vma_is_map_and_fenceable(vma))
                        return -EINVAL;
 
-               if (drm_WARN(&uncore->i915->drm,
-                            !i915_gem_object_get_stride(vma->obj) ||
-                            !i915_gem_object_get_tiling(vma->obj),
-                            "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
-                            i915_gem_object_get_stride(vma->obj),
-                            i915_gem_object_get_tiling(vma->obj)))
-                       return -EINVAL;
+               if (gpu_uses_fence_registers(fence)) {
+                       /* implicit 'unfenced' GPU blits */
+                       ret = i915_vma_sync(vma);
+                       if (ret)
+                               return ret;
+               }
 
-               ret = i915_vma_sync(vma);
-               if (ret)
-                       return ret;
+               fence->start = vma->node.start;
+               fence->size = vma->fence_size;
+               fence->stride = i915_gem_object_get_stride(vma->obj);
+               fence->tiling = i915_gem_object_get_tiling(vma->obj);
        }
+       WRITE_ONCE(fence->dirty, false);
 
        old = xchg(&fence->vma, NULL);
        if (old) {
                /* XXX Ideally we would move the waiting to outside the mutex */
-               ret = i915_vma_sync(old);
+               ret = i915_active_wait(&fence->active);
                if (ret) {
                        fence->vma = old;
                        return ret;
@@ -276,7 +267,7 @@ static int fence_update(struct i915_fence_reg *fence,
        /*
         * We only need to update the register itself if the device is awake.
         * If the device is currently powered down, we will defer the write
-        * to the runtime resume, see i915_gem_restore_fences().
+        * to the runtime resume, see intel_ggtt_restore_fences().
         *
         * This only works for removing the fence register, on acquisition
         * the caller must hold the rpm wakeref. The fence register must
@@ -290,7 +281,7 @@ static int fence_update(struct i915_fence_reg *fence,
        }
 
        WRITE_ONCE(fence->vma, vma);
-       fence_write(fence, vma);
+       fence_write(fence);
 
        if (vma) {
                vma->fence = fence;
@@ -307,23 +298,26 @@ static int fence_update(struct i915_fence_reg *fence,
  *
  * This function force-removes any fence from the given object, which is useful
  * if the kernel wants to do untiled GTT access.
- *
- * Returns:
- *
- * 0 on success, negative error code on failure.
  */
-int i915_vma_revoke_fence(struct i915_vma *vma)
+void i915_vma_revoke_fence(struct i915_vma *vma)
 {
        struct i915_fence_reg *fence = vma->fence;
+       intel_wakeref_t wakeref;
 
        lockdep_assert_held(&vma->vm->mutex);
        if (!fence)
-               return 0;
+               return;
 
-       if (atomic_read(&fence->pin_count))
-               return -EBUSY;
+       GEM_BUG_ON(fence->vma != vma);
+       GEM_BUG_ON(!i915_active_is_idle(&fence->active));
+       GEM_BUG_ON(atomic_read(&fence->pin_count));
 
-       return fence_update(fence, NULL);
+       fence->tiling = 0;
+       WRITE_ONCE(fence->vma, NULL);
+       vma->fence = NULL;
+
+       with_intel_runtime_pm_if_in_use(fence_to_uncore(fence)->rpm, wakeref)
+               fence_write(fence);
 }
 
 static struct i915_fence_reg *fence_find(struct i915_ggtt *ggtt)
@@ -487,34 +481,19 @@ void i915_unreserve_fence(struct i915_fence_reg *fence)
 }
 
 /**
- * i915_gem_restore_fences - restore fence state
+ * intel_ggtt_restore_fences - restore fence state
  * @ggtt: Global GTT
  *
  * Restore the hw fence state to match the software tracking again, to be called
  * after a gpu reset and on resume. Note that on runtime suspend we only cancel
  * the fences, to be reacquired by the user later.
  */
-void i915_gem_restore_fences(struct i915_ggtt *ggtt)
+void intel_ggtt_restore_fences(struct i915_ggtt *ggtt)
 {
        int i;
 
-       rcu_read_lock(); /* keep obj alive as we dereference */
-       for (i = 0; i < ggtt->num_fences; i++) {
-               struct i915_fence_reg *reg = &ggtt->fence_regs[i];
-               struct i915_vma *vma = READ_ONCE(reg->vma);
-
-               GEM_BUG_ON(vma && vma->fence != reg);
-
-               /*
-                * Commit delayed tiling changes if we have an object still
-                * attached to the fence, otherwise just clear the fence.
-                */
-               if (vma && !i915_gem_object_is_tiled(vma->obj))
-                       vma = NULL;
-
-               fence_write(reg, vma);
-       }
-       rcu_read_unlock();
+       for (i = 0; i < ggtt->num_fences; i++)
+               fence_write(&ggtt->fence_regs[i]);
 }
 
 /**
@@ -746,7 +725,7 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
  * bit 17 of its physical address and therefore being interpreted differently
  * by the GPU.
  */
-static void i915_gem_swizzle_page(struct page *page)
+static void swizzle_page(struct page *page)
 {
        char temp[64];
        char *vaddr;
@@ -791,7 +770,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
        for_each_sgt_page(page, sgt_iter, pages) {
                char new_bit_17 = page_to_phys(page) >> 17;
                if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) {
-                       i915_gem_swizzle_page(page);
+                       swizzle_page(page);
                        set_page_dirty(page);
                }
                i++;
@@ -836,7 +815,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
        }
 }
 
-void i915_ggtt_init_fences(struct i915_ggtt *ggtt)
+void intel_ggtt_init_fences(struct i915_ggtt *ggtt)
 {
        struct drm_i915_private *i915 = ggtt->vm.i915;
        struct intel_uncore *uncore = ggtt->vm.gt->uncore;
@@ -864,18 +843,37 @@ void i915_ggtt_init_fences(struct i915_ggtt *ggtt)
        if (intel_vgpu_active(i915))
                num_fences = intel_uncore_read(uncore,
                                               vgtif_reg(avail_rs.fence_num));
+       ggtt->fence_regs = kcalloc(num_fences,
+                                  sizeof(*ggtt->fence_regs),
+                                  GFP_KERNEL);
+       if (!ggtt->fence_regs)
+               num_fences = 0;
 
        /* Initialize fence registers to zero */
        for (i = 0; i < num_fences; i++) {
                struct i915_fence_reg *fence = &ggtt->fence_regs[i];
 
+               i915_active_init(&fence->active, NULL, NULL);
                fence->ggtt = ggtt;
                fence->id = i;
                list_add_tail(&fence->link, &ggtt->fence_list);
        }
        ggtt->num_fences = num_fences;
 
-       i915_gem_restore_fences(ggtt);
+       intel_ggtt_restore_fences(ggtt);
+}
+
+void intel_ggtt_fini_fences(struct i915_ggtt *ggtt)
+{
+       int i;
+
+       for (i = 0; i < ggtt->num_fences; i++) {
+               struct i915_fence_reg *fence = &ggtt->fence_regs[i];
+
+               i915_active_fini(&fence->active);
+       }
+
+       kfree(ggtt->fence_regs);
 }
 
 void intel_gt_init_swizzling(struct intel_gt *gt)
similarity index 86%
rename from drivers/gpu/drm/i915/i915_gem_fence_reg.h
rename to drivers/gpu/drm/i915/gt/intel_ggtt_fencing.h
index 7bd521cd7cd7b21001a3fb3af0c6f4ed8bcd5234..9eef679e1311414772acfe775dc9a366ad65c1e7 100644 (file)
  *
  */
 
-#ifndef __I915_FENCE_REG_H__
-#define __I915_FENCE_REG_H__
+#ifndef __INTEL_GGTT_FENCING_H__
+#define __INTEL_GGTT_FENCING_H__
 
 #include <linux/list.h>
 #include <linux/types.h>
 
+#include "i915_active.h"
+
 struct drm_i915_gem_object;
 struct i915_ggtt;
 struct i915_vma;
@@ -41,6 +43,7 @@ struct i915_fence_reg {
        struct i915_ggtt *ggtt;
        struct i915_vma *vma;
        atomic_t pin_count;
+       struct i915_active active;
        int id;
        /**
         * Whether the tiling parameters for the currently
@@ -51,20 +54,24 @@ struct i915_fence_reg {
         * command (such as BLT on gen2/3), as a "fence".
         */
        bool dirty;
+       u32 start;
+       u32 size;
+       u32 tiling;
+       u32 stride;
 };
 
-/* i915_gem_fence_reg.c */
 struct i915_fence_reg *i915_reserve_fence(struct i915_ggtt *ggtt);
 void i915_unreserve_fence(struct i915_fence_reg *fence);
 
-void i915_gem_restore_fences(struct i915_ggtt *ggtt);
+void intel_ggtt_restore_fences(struct i915_ggtt *ggtt);
 
 void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
                                       struct sg_table *pages);
 void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
                                         struct sg_table *pages);
 
-void i915_ggtt_init_fences(struct i915_ggtt *ggtt);
+void intel_ggtt_init_fences(struct i915_ggtt *ggtt);
+void intel_ggtt_fini_fences(struct i915_ggtt *ggtt);
 
 void intel_gt_init_swizzling(struct intel_gt *gt);
 
index f04214a54f7580b69ac581773b789c304a228a02..534e435f20bcc719d8701cfdf64a7789d3839ae9 100644 (file)
  */
 #define MI_LOAD_REGISTER_IMM(x)        MI_INSTR(0x22, 2*(x)-1)
 /* Gen11+. addr = base + (ctx_restore ? offset & GENMASK(12,2) : offset) */
-#define   MI_LRI_CS_MMIO               (1<<19)
+#define   MI_LRI_LRM_CS_MMIO           REG_BIT(19)
 #define   MI_LRI_FORCE_POSTED          (1<<12)
 #define MI_LOAD_REGISTER_IMM_MAX_REGS (126)
 #define MI_STORE_REGISTER_MEM        MI_INSTR(0x24, 1)
 #define MI_LOAD_REGISTER_MEM      MI_INSTR(0x29, 1)
 #define MI_LOAD_REGISTER_MEM_GEN8  MI_INSTR(0x29, 2)
 #define MI_LOAD_REGISTER_REG    MI_INSTR(0x2A, 1)
+#define   MI_LRR_SOURCE_CS_MMIO                REG_BIT(18)
 #define MI_BATCH_BUFFER                MI_INSTR(0x30, 1)
 #define   MI_BATCH_NON_SECURE          (1)
 /* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
 #define   PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH       (1<<12) /* gen6+ */
 #define   PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE    (1<<11) /* MBZ on ILK */
 #define   PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE                (1<<10) /* GM45+ only */
-#define   PIPE_CONTROL_L3_RO_CACHE_INVALIDATE          REG_BIT(10) /* gen12 */
 #define   PIPE_CONTROL_INDIRECT_STATE_DISABLE          (1<<9)
-#define   PIPE_CONTROL_HDC_PIPELINE_FLUSH              REG_BIT(9)  /* gen12 */
+#define   PIPE_CONTROL0_HDC_PIPELINE_FLUSH             REG_BIT(9)  /* gen12 */
 #define   PIPE_CONTROL_NOTIFY                          (1<<8)
 #define   PIPE_CONTROL_FLUSH_ENABLE                    (1<<7) /* gen7+ */
 #define   PIPE_CONTROL_DC_FLUSH_ENABLE                 (1<<5)
index d09f7596cb98b4fbb3e3d94edf54fb2740b430e4..f069551e412f3ec9150187e69d62dc3d2d58c4fe 100644 (file)
@@ -7,6 +7,8 @@
 #include "i915_drv.h"
 #include "intel_context.h"
 #include "intel_gt.h"
+#include "intel_gt_buffer_pool.h"
+#include "intel_gt_clock_utils.h"
 #include "intel_gt_pm.h"
 #include "intel_gt_requests.h"
 #include "intel_mocs.h"
@@ -15,6 +17,7 @@
 #include "intel_rps.h"
 #include "intel_uncore.h"
 #include "intel_pm.h"
+#include "shmem_utils.h"
 
 void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
 {
@@ -26,6 +29,7 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
        INIT_LIST_HEAD(&gt->closed_vma);
        spin_lock_init(&gt->closed_lock);
 
+       intel_gt_init_buffer_pool(gt);
        intel_gt_init_reset(gt);
        intel_gt_init_requests(gt);
        intel_gt_init_timelines(gt);
@@ -370,18 +374,6 @@ static struct i915_address_space *kernel_vm(struct intel_gt *gt)
                return i915_vm_get(&gt->ggtt->vm);
 }
 
-static int __intel_context_flush_retire(struct intel_context *ce)
-{
-       struct intel_timeline *tl;
-
-       tl = intel_context_timeline_lock(ce);
-       if (IS_ERR(tl))
-               return PTR_ERR(tl);
-
-       intel_context_timeline_unlock(tl);
-       return 0;
-}
-
 static int __engines_record_defaults(struct intel_gt *gt)
 {
        struct i915_request *requests[I915_NUM_ENGINES] = {};
@@ -447,8 +439,7 @@ err_rq:
 
        for (id = 0; id < ARRAY_SIZE(requests); id++) {
                struct i915_request *rq;
-               struct i915_vma *state;
-               void *vaddr;
+               struct file *state;
 
                rq = requests[id];
                if (!rq)
@@ -460,48 +451,16 @@ err_rq:
                }
 
                GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, &rq->context->flags));
-               state = rq->context->state;
-               if (!state)
+               if (!rq->context->state)
                        continue;
 
-               /* Serialise with retirement on another CPU */
-               GEM_BUG_ON(!i915_request_completed(rq));
-               err = __intel_context_flush_retire(rq->context);
-               if (err)
-                       goto out;
-
-               /* We want to be able to unbind the state from the GGTT */
-               GEM_BUG_ON(intel_context_is_pinned(rq->context));
-
-               /*
-                * As we will hold a reference to the logical state, it will
-                * not be torn down with the context, and importantly the
-                * object will hold onto its vma (making it possible for a
-                * stray GTT write to corrupt our defaults). Unmap the vma
-                * from the GTT to prevent such accidents and reclaim the
-                * space.
-                */
-               err = i915_vma_unbind(state);
-               if (err)
-                       goto out;
-
-               i915_gem_object_lock(state->obj);
-               err = i915_gem_object_set_to_cpu_domain(state->obj, false);
-               i915_gem_object_unlock(state->obj);
-               if (err)
-                       goto out;
-
-               i915_gem_object_set_cache_coherency(state->obj, I915_CACHE_LLC);
-
-               /* Check we can acquire the image of the context state */
-               vaddr = i915_gem_object_pin_map(state->obj, I915_MAP_FORCE_WB);
-               if (IS_ERR(vaddr)) {
-                       err = PTR_ERR(vaddr);
+               /* Keep a copy of the state's backing pages; free the obj */
+               state = shmem_create_from_object(rq->context->state->obj);
+               if (IS_ERR(state)) {
+                       err = PTR_ERR(state);
                        goto out;
                }
-
-               rq->engine->default_state = i915_gem_object_get(state->obj);
-               i915_gem_object_unpin_map(state->obj);
+               rq->engine->default_state = state;
        }
 
 out:
@@ -576,6 +535,8 @@ int intel_gt_init(struct intel_gt *gt)
         */
        intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
 
+       intel_gt_init_clock_frequency(gt);
+
        err = intel_gt_init_scratch(gt, IS_GEN(gt->i915, 2) ? SZ_256K : SZ_4K);
        if (err)
                goto out_fw;
@@ -635,8 +596,7 @@ void intel_gt_driver_remove(struct intel_gt *gt)
 {
        __intel_gt_disable(gt);
 
-       intel_uc_fini_hw(&gt->uc);
-       intel_uc_fini(&gt->uc);
+       intel_uc_driver_remove(&gt->uc);
 
        intel_engines_release(gt);
 }
@@ -663,6 +623,7 @@ void intel_gt_driver_release(struct intel_gt *gt)
 
        intel_gt_pm_fini(gt);
        intel_gt_fini_scratch(gt);
+       intel_gt_fini_buffer_pool(gt);
 }
 
 void intel_gt_driver_late_release(struct intel_gt *gt)
similarity index 53%
rename from drivers/gpu/drm/i915/gt/intel_engine_pool.c
rename to drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
index 3971868183050aee3626d26056e12b33a752d66f..1495054a43053897b7e1ed5396a657db237d5d7b 100644 (file)
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
 /*
- * SPDX-License-Identifier: MIT
- *
  * Copyright © 2014-2018 Intel Corporation
  */
 
@@ -8,15 +7,15 @@
 
 #include "i915_drv.h"
 #include "intel_engine_pm.h"
-#include "intel_engine_pool.h"
+#include "intel_gt_buffer_pool.h"
 
-static struct intel_engine_cs *to_engine(struct intel_engine_pool *pool)
+static struct intel_gt *to_gt(struct intel_gt_buffer_pool *pool)
 {
-       return container_of(pool, struct intel_engine_cs, pool);
+       return container_of(pool, struct intel_gt, buffer_pool);
 }
 
 static struct list_head *
-bucket_for_size(struct intel_engine_pool *pool, size_t sz)
+bucket_for_size(struct intel_gt_buffer_pool *pool, size_t sz)
 {
        int n;
 
@@ -32,16 +31,50 @@ bucket_for_size(struct intel_engine_pool *pool, size_t sz)
        return &pool->cache_list[n];
 }
 
-static void node_free(struct intel_engine_pool_node *node)
+static void node_free(struct intel_gt_buffer_pool_node *node)
 {
        i915_gem_object_put(node->obj);
        i915_active_fini(&node->active);
        kfree(node);
 }
 
+static void pool_free_work(struct work_struct *wrk)
+{
+       struct intel_gt_buffer_pool *pool =
+               container_of(wrk, typeof(*pool), work.work);
+       struct intel_gt_buffer_pool_node *node, *next;
+       unsigned long old = jiffies - HZ;
+       bool active = false;
+       LIST_HEAD(stale);
+       int n;
+
+       /* Free buffers that have not been used in the past second */
+       spin_lock_irq(&pool->lock);
+       for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
+               struct list_head *list = &pool->cache_list[n];
+
+               /* Most recent at head; oldest at tail */
+               list_for_each_entry_safe_reverse(node, next, list, link) {
+                       if (time_before(node->age, old))
+                               break;
+
+                       list_move(&node->link, &stale);
+               }
+               active |= !list_empty(list);
+       }
+       spin_unlock_irq(&pool->lock);
+
+       list_for_each_entry_safe(node, next, &stale, link)
+               node_free(node);
+
+       if (active)
+               schedule_delayed_work(&pool->work,
+                                     round_jiffies_up_relative(HZ));
+}
+
 static int pool_active(struct i915_active *ref)
 {
-       struct intel_engine_pool_node *node =
+       struct intel_gt_buffer_pool_node *node =
                container_of(ref, typeof(*node), active);
        struct dma_resv *resv = node->obj->base.resv;
        int err;
@@ -64,29 +97,31 @@ static int pool_active(struct i915_active *ref)
 __i915_active_call
 static void pool_retire(struct i915_active *ref)
 {
-       struct intel_engine_pool_node *node =
+       struct intel_gt_buffer_pool_node *node =
                container_of(ref, typeof(*node), active);
-       struct intel_engine_pool *pool = node->pool;
+       struct intel_gt_buffer_pool *pool = node->pool;
        struct list_head *list = bucket_for_size(pool, node->obj->base.size);
        unsigned long flags;
 
-       GEM_BUG_ON(!intel_engine_pm_is_awake(to_engine(pool)));
-
        i915_gem_object_unpin_pages(node->obj);
 
        /* Return this object to the shrinker pool */
        i915_gem_object_make_purgeable(node->obj);
 
        spin_lock_irqsave(&pool->lock, flags);
+       node->age = jiffies;
        list_add(&node->link, list);
        spin_unlock_irqrestore(&pool->lock, flags);
+
+       schedule_delayed_work(&pool->work,
+                             round_jiffies_up_relative(HZ));
 }
 
-static struct intel_engine_pool_node *
-node_create(struct intel_engine_pool *pool, size_t sz)
+static struct intel_gt_buffer_pool_node *
+node_create(struct intel_gt_buffer_pool *pool, size_t sz)
 {
-       struct intel_engine_cs *engine = to_engine(pool);
-       struct intel_engine_pool_node *node;
+       struct intel_gt *gt = to_gt(pool);
+       struct intel_gt_buffer_pool_node *node;
        struct drm_i915_gem_object *obj;
 
        node = kmalloc(sizeof(*node),
@@ -97,7 +132,7 @@ node_create(struct intel_engine_pool *pool, size_t sz)
        node->pool = pool;
        i915_active_init(&node->active, pool_active, pool_retire);
 
-       obj = i915_gem_object_create_internal(engine->i915, sz);
+       obj = i915_gem_object_create_internal(gt->i915, sz);
        if (IS_ERR(obj)) {
                i915_active_fini(&node->active);
                kfree(node);
@@ -110,26 +145,15 @@ node_create(struct intel_engine_pool *pool, size_t sz)
        return node;
 }
 
-static struct intel_engine_pool *lookup_pool(struct intel_engine_cs *engine)
+struct intel_gt_buffer_pool_node *
+intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size)
 {
-       if (intel_engine_is_virtual(engine))
-               engine = intel_virtual_engine_get_sibling(engine, 0);
-
-       GEM_BUG_ON(!engine);
-       return &engine->pool;
-}
-
-struct intel_engine_pool_node *
-intel_engine_get_pool(struct intel_engine_cs *engine, size_t size)
-{
-       struct intel_engine_pool *pool = lookup_pool(engine);
-       struct intel_engine_pool_node *node;
+       struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
+       struct intel_gt_buffer_pool_node *node;
        struct list_head *list;
        unsigned long flags;
        int ret;
 
-       GEM_BUG_ON(!intel_engine_pm_is_awake(to_engine(pool)));
-
        size = PAGE_ALIGN(size);
        list = bucket_for_size(pool, size);
 
@@ -157,34 +181,48 @@ intel_engine_get_pool(struct intel_engine_cs *engine, size_t size)
        return node;
 }
 
-void intel_engine_pool_init(struct intel_engine_pool *pool)
+void intel_gt_init_buffer_pool(struct intel_gt *gt)
 {
+       struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
        int n;
 
        spin_lock_init(&pool->lock);
        for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
                INIT_LIST_HEAD(&pool->cache_list[n]);
+       INIT_DELAYED_WORK(&pool->work, pool_free_work);
 }
 
-void intel_engine_pool_park(struct intel_engine_pool *pool)
+static void pool_free_imm(struct intel_gt_buffer_pool *pool)
 {
        int n;
 
+       spin_lock_irq(&pool->lock);
        for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
+               struct intel_gt_buffer_pool_node *node, *next;
                struct list_head *list = &pool->cache_list[n];
-               struct intel_engine_pool_node *node, *nn;
 
-               list_for_each_entry_safe(node, nn, list, link)
+               list_for_each_entry_safe(node, next, list, link)
                        node_free(node);
-
                INIT_LIST_HEAD(list);
        }
+       spin_unlock_irq(&pool->lock);
+}
+
+void intel_gt_flush_buffer_pool(struct intel_gt *gt)
+{
+       struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
+
+       if (cancel_delayed_work_sync(&pool->work))
+               pool_free_imm(pool);
 }
 
-void intel_engine_pool_fini(struct intel_engine_pool *pool)
+void intel_gt_fini_buffer_pool(struct intel_gt *gt)
 {
+       struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
        int n;
 
+       intel_gt_flush_buffer_pool(gt);
+
        for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
                GEM_BUG_ON(!list_empty(&pool->cache_list[n]));
 }
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h
new file mode 100644 (file)
index 0000000..42cbac0
--- /dev/null
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#ifndef INTEL_GT_BUFFER_POOL_H
+#define INTEL_GT_BUFFER_POOL_H
+
+#include <linux/types.h>
+
+#include "i915_active.h"
+#include "intel_gt_buffer_pool_types.h"
+
+struct intel_gt;
+struct i915_request;
+
+struct intel_gt_buffer_pool_node *
+intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size);
+
+static inline int
+intel_gt_buffer_pool_mark_active(struct intel_gt_buffer_pool_node *node,
+                                struct i915_request *rq)
+{
+       return i915_active_add_request(&node->active, rq);
+}
+
+static inline void
+intel_gt_buffer_pool_put(struct intel_gt_buffer_pool_node *node)
+{
+       i915_active_release(&node->active);
+}
+
+void intel_gt_init_buffer_pool(struct intel_gt *gt);
+void intel_gt_flush_buffer_pool(struct intel_gt *gt);
+void intel_gt_fini_buffer_pool(struct intel_gt *gt);
+
+#endif /* INTEL_GT_BUFFER_POOL_H */
similarity index 54%
rename from drivers/gpu/drm/i915/gt/intel_engine_pool_types.h
rename to drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h
index e31ee361b76f7a97dc2639ec48506ea4e3b3ed34..e28bdda771ed14371e03276f97f1fc03e669b4cd 100644 (file)
@@ -4,26 +4,29 @@
  * Copyright © 2014-2018 Intel Corporation
  */
 
-#ifndef INTEL_ENGINE_POOL_TYPES_H
-#define INTEL_ENGINE_POOL_TYPES_H
+#ifndef INTEL_GT_BUFFER_POOL_TYPES_H
+#define INTEL_GT_BUFFER_POOL_TYPES_H
 
 #include <linux/list.h>
 #include <linux/spinlock.h>
+#include <linux/workqueue.h>
 
 #include "i915_active_types.h"
 
 struct drm_i915_gem_object;
 
-struct intel_engine_pool {
+struct intel_gt_buffer_pool {
        spinlock_t lock;
        struct list_head cache_list[4];
+       struct delayed_work work;
 };
 
-struct intel_engine_pool_node {
+struct intel_gt_buffer_pool_node {
        struct i915_active active;
        struct drm_i915_gem_object *obj;
        struct list_head link;
-       struct intel_engine_pool *pool;
+       struct intel_gt_buffer_pool *pool;
+       unsigned long age;
 };
 
-#endif /* INTEL_ENGINE_POOL_TYPES_H */
+#endif /* INTEL_GT_BUFFER_POOL_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
new file mode 100644 (file)
index 0000000..9990796
--- /dev/null
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_gt.h"
+#include "intel_gt_clock_utils.h"
+
+#define MHZ_12   12000000 /* 12MHz (24MHz/2), 83.333ns */
+#define MHZ_12_5 12500000 /* 12.5MHz (25MHz/2), 80ns */
+#define MHZ_19_2 19200000 /* 19.2MHz, 52.083ns */
+
+static u32 read_clock_frequency(const struct intel_gt *gt)
+{
+       if (INTEL_GEN(gt->i915) >= 11) {
+               u32 config;
+
+               config = intel_uncore_read(gt->uncore, RPM_CONFIG0);
+               config &= GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK;
+               config >>= GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
+
+               switch (config) {
+               case 0: return MHZ_12;
+               case 1:
+               case 2: return MHZ_19_2;
+               default:
+               case 3: return MHZ_12_5;
+               }
+       } else if (INTEL_GEN(gt->i915) >= 9) {
+               if (IS_GEN9_LP(gt->i915))
+                       return MHZ_19_2;
+               else
+                       return MHZ_12;
+       } else {
+               return MHZ_12_5;
+       }
+}
+
+void intel_gt_init_clock_frequency(struct intel_gt *gt)
+{
+       /*
+        * Note that on gen11+, the clock frequency may be reconfigured.
+        * We do not, and we assume nobody else does.
+        */
+       gt->clock_frequency = read_clock_frequency(gt);
+       GT_TRACE(gt,
+                "Using clock frequency: %dkHz\n",
+                gt->clock_frequency / 1000);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+void intel_gt_check_clock_frequency(const struct intel_gt *gt)
+{
+       if (gt->clock_frequency != read_clock_frequency(gt)) {
+               dev_err(gt->i915->drm.dev,
+                       "GT clock frequency changed, was %uHz, now %uHz!\n",
+                       gt->clock_frequency,
+                       read_clock_frequency(gt));
+       }
+}
+#endif
+
+static u64 div_u64_roundup(u64 nom, u32 den)
+{
+       return div_u64(nom + den - 1, den);
+}
+
+u32 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u32 count)
+{
+       return div_u64_roundup(mul_u32_u32(count, 1000 * 1000 * 1000),
+                              gt->clock_frequency);
+}
+
+u32 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u32 count)
+{
+       return intel_gt_clock_interval_to_ns(gt, 16 * count);
+}
+
+u32 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u32 ns)
+{
+       return div_u64_roundup(mul_u32_u32(gt->clock_frequency, ns),
+                              1000 * 1000 * 1000);
+}
+
+u32 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u32 ns)
+{
+       u32 val;
+
+       /*
+        * Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
+        * 8300) freezing up around GPU hangs. Looks as if even
+        * scheduling/timer interrupts start misbehaving if the RPS
+        * EI/thresholds are "bad", leading to a very sluggish or even
+        * frozen machine.
+        */
+       val = DIV_ROUND_UP(intel_gt_ns_to_clock_interval(gt, ns), 16);
+       if (IS_GEN(gt->i915, 6))
+               val = roundup(val, 25);
+
+       return val;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h
new file mode 100644 (file)
index 0000000..f793c89
--- /dev/null
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __INTEL_GT_CLOCK_UTILS_H__
+#define __INTEL_GT_CLOCK_UTILS_H__
+
+#include <linux/types.h>
+
+struct intel_gt;
+
+void intel_gt_init_clock_frequency(struct intel_gt *gt);
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+void intel_gt_check_clock_frequency(const struct intel_gt *gt);
+#else
+static inline void intel_gt_check_clock_frequency(const struct intel_gt *gt) {}
+#endif
+
+u32 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u32 count);
+u32 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u32 count);
+
+u32 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u32 ns);
+u32 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u32 ns);
+
+#endif /* __INTEL_GT_CLOCK_UTILS_H__ */
index 8b653c0f5e5f91e364c65ef97cddc580491badd3..6bdb434a442d530edd4fb7afbe08187438cdb6e0 100644 (file)
@@ -12,6 +12,7 @@
 #include "intel_context.h"
 #include "intel_engine_pm.h"
 #include "intel_gt.h"
+#include "intel_gt_clock_utils.h"
 #include "intel_gt_pm.h"
 #include "intel_gt_requests.h"
 #include "intel_llc.h"
@@ -138,6 +139,8 @@ static void gt_sanitize(struct intel_gt *gt, bool force)
        wakeref = intel_runtime_pm_get(gt->uncore->rpm);
        intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
 
+       intel_gt_check_clock_frequency(gt);
+
        /*
         * As we have just resumed the machine and woken the device up from
         * deep PCI sleep (presumably D3_cold), assume the HW has been reset
@@ -155,6 +158,10 @@ static void gt_sanitize(struct intel_gt *gt, bool force)
 
        intel_uc_reset_prepare(&gt->uc);
 
+       for_each_engine(engine, gt, id)
+               if (engine->sanitize)
+                       engine->sanitize(engine);
+
        if (reset_engines(gt) || force) {
                for_each_engine(engine, gt, id)
                        __intel_engine_reset(engine, false);
@@ -164,6 +171,8 @@ static void gt_sanitize(struct intel_gt *gt, bool force)
                if (engine->reset.finish)
                        engine->reset.finish(engine);
 
+       intel_rps_sanitize(&gt->rps);
+
        intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
        intel_runtime_pm_put(gt->uncore->rpm, wakeref);
 }
@@ -191,11 +200,12 @@ int intel_gt_resume(struct intel_gt *gt)
         * Only the kernel contexts should remain pinned over suspend,
         * allowing us to fixup the user contexts on their first pin.
         */
+       gt_sanitize(gt, true);
+
        intel_gt_pm_get(gt);
 
        intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
        intel_rc6_sanitize(&gt->rc6);
-       gt_sanitize(gt, true);
        if (intel_gt_is_wedged(gt)) {
                err = -EIO;
                goto out_fw;
@@ -204,7 +214,7 @@ int intel_gt_resume(struct intel_gt *gt)
        /* Only when the HW is re-initialised, can we replay the requests */
        err = intel_gt_init_hw(gt);
        if (err) {
-               dev_err(gt->i915->drm.dev,
+               drm_err(&gt->i915->drm,
                        "Failed to initialize GPU, declaring it wedged!\n");
                goto err_wedged;
        }
@@ -220,7 +230,7 @@ int intel_gt_resume(struct intel_gt *gt)
 
                intel_engine_pm_put(engine);
                if (err) {
-                       dev_err(gt->i915->drm.dev,
+                       drm_err(&gt->i915->drm,
                                "Failed to restart %s (%d)\n",
                                engine->name, err);
                        goto err_wedged;
@@ -324,6 +334,7 @@ int intel_gt_runtime_resume(struct intel_gt *gt)
 {
        GT_TRACE(gt, "\n");
        intel_gt_init_swizzling(gt);
+       intel_ggtt_restore_fences(gt->ggtt);
 
        return intel_uc_runtime_resume(&gt->uc);
 }
index 24c99d0838af6e23e7eb6295139b852d02ec538c..16ff47c83bd577f5764bccea4a94dfa062c8f460 100644 (file)
@@ -26,6 +26,11 @@ static bool retire_requests(struct intel_timeline *tl)
        return !i915_active_fence_isset(&tl->last_request);
 }
 
+static bool engine_active(const struct intel_engine_cs *engine)
+{
+       return !list_empty(&engine->kernel_context->timeline->requests);
+}
+
 static bool flush_submission(struct intel_gt *gt)
 {
        struct intel_engine_cs *engine;
@@ -37,8 +42,13 @@ static bool flush_submission(struct intel_gt *gt)
 
        for_each_engine(engine, gt, id) {
                intel_engine_flush_submission(engine);
-               active |= flush_work(&engine->retire_work);
-               active |= flush_work(&engine->wakeref.work);
+
+               /* Flush the background retirement and idle barriers */
+               flush_work(&engine->retire_work);
+               flush_delayed_work(&engine->wakeref.work);
+
+               /* Is the idle barrier still outstanding? */
+               active |= engine_active(engine);
        }
 
        return active;
@@ -162,7 +172,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
                        }
                }
 
-               if (!retire_requests(tl) || flush_submission(gt))
+               if (!retire_requests(tl))
                        active_count++;
                mutex_unlock(&tl->mutex);
 
@@ -173,7 +183,6 @@ out_active: spin_lock(&timelines->lock);
                if (atomic_dec_and_test(&tl->active_count))
                        list_del(&tl->link);
 
-
                /* Defer the final release to after the spinlock */
                if (refcount_dec_and_test(&tl->kref.refcount)) {
                        GEM_BUG_ON(atomic_read(&tl->active_count));
@@ -185,6 +194,9 @@ out_active: spin_lock(&timelines->lock);
        list_for_each_entry_safe(tl, tn, &free, link)
                __intel_timeline_free(&tl->kref);
 
+       if (flush_submission(gt)) /* Wait, there's more! */
+               active_count++;
+
        return active_count ? timeout : 0;
 }
 
index 96890dd12b5feaded292dd5afba837de028c7891..0cc1d6b185dc1f63c42626041e629b138d675b1a 100644 (file)
@@ -17,6 +17,7 @@
 
 #include "i915_vma.h"
 #include "intel_engine_types.h"
+#include "intel_gt_buffer_pool_types.h"
 #include "intel_llc_types.h"
 #include "intel_reset_types.h"
 #include "intel_rc6_types.h"
@@ -61,6 +62,7 @@ struct intel_gt {
        struct list_head closed_vma;
        spinlock_t closed_lock; /* guards the list of closed_vma */
 
+       ktime_t last_init_time;
        struct intel_reset reset;
 
        /**
@@ -72,14 +74,12 @@ struct intel_gt {
         */
        intel_wakeref_t awake;
 
+       u32 clock_frequency;
+
        struct intel_llc llc;
        struct intel_rc6 rc6;
        struct intel_rps rps;
 
-       ktime_t last_init_time;
-
-       struct i915_vma *scratch;
-
        spinlock_t irq_lock;
        u32 gt_imr;
        u32 pm_ier;
@@ -97,6 +97,18 @@ struct intel_gt {
         * Reserved for exclusive use by the kernel.
         */
        struct i915_address_space *vm;
+
+       /*
+        * A pool of objects to use as shadow copies of client batch buffers
+        * when the command parser is enabled. Prevents the client from
+        * modifying the batch contents after software parsing.
+        *
+        * Buffers older than 1s are periodically reaped from the pool,
+        * or may be reclaimed by the shrinker before then.
+        */
+       struct intel_gt_buffer_pool buffer_pool;
+
+       struct i915_vma *scratch;
 };
 
 enum intel_gt_scratch_field {
index b3116fe8d1807dd09e5a3e54076e1c7105a3ea9a..d93ebdf3fa0ef81d986520ce2d8a5de6db2ba3bd 100644 (file)
@@ -26,7 +26,6 @@
 #include <drm/drm_mm.h>
 
 #include "gt/intel_reset.h"
-#include "i915_gem_fence_reg.h"
 #include "i915_selftest.h"
 #include "i915_vma_types.h"
 
@@ -135,6 +134,8 @@ typedef u64 gen8_pte_t;
 #define GEN8_PDE_IPS_64K BIT(11)
 #define GEN8_PDE_PS_2M   BIT(7)
 
+struct i915_fence_reg;
+
 #define for_each_sgt_daddr(__dp, __iter, __sgt) \
        __for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE)
 
@@ -333,7 +334,7 @@ struct i915_ggtt {
        u32 pin_bias;
 
        unsigned int num_fences;
-       struct i915_fence_reg fence_regs[I915_MAX_NUM_FENCES];
+       struct i915_fence_reg *fence_regs;
        struct list_head fence_list;
 
        /**
index 2dfaddb8811edf863a1cec02bdefcebfd8941dd0..87e6c5bdd2dcef7e94a7e4095ad7e5b8438c5158 100644 (file)
 #include "intel_reset.h"
 #include "intel_ring.h"
 #include "intel_workarounds.h"
+#include "shmem_utils.h"
 
 #define RING_EXECLIST_QFULL            (1 << 0x2)
 #define RING_EXECLIST1_VALID           (1 << 0x3)
@@ -216,7 +217,7 @@ struct virtual_engine {
 
        /* And finally, which physical engines this virtual engine maps onto. */
        unsigned int num_siblings;
-       struct intel_engine_cs *siblings[0];
+       struct intel_engine_cs *siblings[];
 };
 
 static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine)
@@ -238,6 +239,123 @@ __execlists_update_reg_state(const struct intel_context *ce,
                             const struct intel_engine_cs *engine,
                             u32 head);
 
+static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
+{
+       if (INTEL_GEN(engine->i915) >= 12)
+               return 0x60;
+       else if (INTEL_GEN(engine->i915) >= 9)
+               return 0x54;
+       else if (engine->class == RENDER_CLASS)
+               return 0x58;
+       else
+               return -1;
+}
+
+static int lrc_ring_gpr0(const struct intel_engine_cs *engine)
+{
+       if (INTEL_GEN(engine->i915) >= 12)
+               return 0x74;
+       else if (INTEL_GEN(engine->i915) >= 9)
+               return 0x68;
+       else if (engine->class == RENDER_CLASS)
+               return 0xd8;
+       else
+               return -1;
+}
+
+static int lrc_ring_wa_bb_per_ctx(const struct intel_engine_cs *engine)
+{
+       if (INTEL_GEN(engine->i915) >= 12)
+               return 0x12;
+       else if (INTEL_GEN(engine->i915) >= 9 || engine->class == RENDER_CLASS)
+               return 0x18;
+       else
+               return -1;
+}
+
+static int lrc_ring_indirect_ptr(const struct intel_engine_cs *engine)
+{
+       int x;
+
+       x = lrc_ring_wa_bb_per_ctx(engine);
+       if (x < 0)
+               return x;
+
+       return x + 2;
+}
+
+static int lrc_ring_indirect_offset(const struct intel_engine_cs *engine)
+{
+       int x;
+
+       x = lrc_ring_indirect_ptr(engine);
+       if (x < 0)
+               return x;
+
+       return x + 2;
+}
+
+static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine)
+{
+       if (engine->class != RENDER_CLASS)
+               return -1;
+
+       if (INTEL_GEN(engine->i915) >= 12)
+               return 0xb6;
+       else if (INTEL_GEN(engine->i915) >= 11)
+               return 0xaa;
+       else
+               return -1;
+}
+
+static u32
+lrc_ring_indirect_offset_default(const struct intel_engine_cs *engine)
+{
+       switch (INTEL_GEN(engine->i915)) {
+       default:
+               MISSING_CASE(INTEL_GEN(engine->i915));
+               fallthrough;
+       case 12:
+               return GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+       case 11:
+               return GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+       case 10:
+               return GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+       case 9:
+               return GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+       case 8:
+               return GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+       }
+}
+
+static void
+lrc_ring_setup_indirect_ctx(u32 *regs,
+                           const struct intel_engine_cs *engine,
+                           u32 ctx_bb_ggtt_addr,
+                           u32 size)
+{
+       GEM_BUG_ON(!size);
+       GEM_BUG_ON(!IS_ALIGNED(size, CACHELINE_BYTES));
+       GEM_BUG_ON(lrc_ring_indirect_ptr(engine) == -1);
+       regs[lrc_ring_indirect_ptr(engine) + 1] =
+               ctx_bb_ggtt_addr | (size / CACHELINE_BYTES);
+
+       GEM_BUG_ON(lrc_ring_indirect_offset(engine) == -1);
+       regs[lrc_ring_indirect_offset(engine) + 1] =
+               lrc_ring_indirect_offset_default(engine) << 6;
+}
+
+static u32 intel_context_get_runtime(const struct intel_context *ce)
+{
+       /*
+        * We can use either ppHWSP[16] which is recorded before the context
+        * switch (and so excludes the cost of context switches) or use the
+        * value from the context image itself, which is saved/restored earlier
+        * and so includes the cost of the save.
+        */
+       return READ_ONCE(ce->lrc_reg_state[CTX_TIMESTAMP]);
+}
+
 static void mark_eio(struct i915_request *rq)
 {
        if (i915_request_completed(rq))
@@ -311,18 +429,7 @@ static int effective_prio(const struct i915_request *rq)
        if (i915_request_has_nopreempt(rq))
                prio = I915_PRIORITY_UNPREEMPTABLE;
 
-       /*
-        * On unwinding the active request, we give it a priority bump
-        * if it has completed waiting on any semaphore. If we know that
-        * the request has already started, we can prevent an unwanted
-        * preempt-to-idle cycle by taking that into account now.
-        */
-       if (__i915_request_has_started(rq))
-               prio |= I915_PRIORITY_NOSEMAPHORE;
-
-       /* Restrict mere WAIT boosts from triggering preemption */
-       BUILD_BUG_ON(__NO_PREEMPTION & ~I915_PRIORITY_MASK); /* only internal */
-       return prio | __NO_PREEMPTION;
+       return prio;
 }
 
 static int queue_prio(const struct intel_engine_execlists *execlists)
@@ -489,7 +596,7 @@ static void set_offsets(u32 *regs,
 #define REG16(x) \
        (((x) >> 9) | BIT(7) | BUILD_BUG_ON_ZERO(x >= 0x10000)), \
        (((x) >> 2) & 0x7f)
-#define END(x) 0, (x)
+#define END(total_state_size) 0, (total_state_size)
 {
        const u32 base = engine->mmio_base;
 
@@ -512,7 +619,7 @@ static void set_offsets(u32 *regs,
                if (flags & POSTED)
                        *regs |= MI_LRI_FORCE_POSTED;
                if (INTEL_GEN(engine->i915) >= 11)
-                       *regs |= MI_LRI_CS_MMIO;
+                       *regs |= MI_LRI_LRM_CS_MMIO;
                regs++;
 
                GEM_BUG_ON(!count);
@@ -897,8 +1004,63 @@ static const u8 gen12_rcs_offsets[] = {
        NOP(6),
        LRI(1, 0),
        REG(0x0c8),
+       NOP(3 + 9 + 1),
+
+       LRI(51, POSTED),
+       REG16(0x588),
+       REG16(0x588),
+       REG16(0x588),
+       REG16(0x588),
+       REG16(0x588),
+       REG16(0x588),
+       REG(0x028),
+       REG(0x09c),
+       REG(0x0c0),
+       REG(0x178),
+       REG(0x17c),
+       REG16(0x358),
+       REG(0x170),
+       REG(0x150),
+       REG(0x154),
+       REG(0x158),
+       REG16(0x41c),
+       REG16(0x600),
+       REG16(0x604),
+       REG16(0x608),
+       REG16(0x60c),
+       REG16(0x610),
+       REG16(0x614),
+       REG16(0x618),
+       REG16(0x61c),
+       REG16(0x620),
+       REG16(0x624),
+       REG16(0x628),
+       REG16(0x62c),
+       REG16(0x630),
+       REG16(0x634),
+       REG16(0x638),
+       REG16(0x63c),
+       REG16(0x640),
+       REG16(0x644),
+       REG16(0x648),
+       REG16(0x64c),
+       REG16(0x650),
+       REG16(0x654),
+       REG16(0x658),
+       REG16(0x65c),
+       REG16(0x660),
+       REG16(0x664),
+       REG16(0x668),
+       REG16(0x66c),
+       REG16(0x670),
+       REG16(0x674),
+       REG16(0x678),
+       REG16(0x67c),
+       REG(0x068),
+       REG(0x084),
+       NOP(1),
 
-       END(80)
+       END(192)
 };
 
 #undef END
@@ -1026,17 +1188,14 @@ static void intel_engine_context_in(struct intel_engine_cs *engine)
 {
        unsigned long flags;
 
-       if (READ_ONCE(engine->stats.enabled) == 0)
+       if (atomic_add_unless(&engine->stats.active, 1, 0))
                return;
 
        write_seqlock_irqsave(&engine->stats.lock, flags);
-
-       if (engine->stats.enabled > 0) {
-               if (engine->stats.active++ == 0)
-                       engine->stats.start = ktime_get();
-               GEM_BUG_ON(engine->stats.active == 0);
+       if (!atomic_add_unless(&engine->stats.active, 1, 0)) {
+               engine->stats.start = ktime_get();
+               atomic_inc(&engine->stats.active);
        }
-
        write_sequnlock_irqrestore(&engine->stats.lock, flags);
 }
 
@@ -1044,51 +1203,20 @@ static void intel_engine_context_out(struct intel_engine_cs *engine)
 {
        unsigned long flags;
 
-       if (READ_ONCE(engine->stats.enabled) == 0)
+       GEM_BUG_ON(!atomic_read(&engine->stats.active));
+
+       if (atomic_add_unless(&engine->stats.active, -1, 1))
                return;
 
        write_seqlock_irqsave(&engine->stats.lock, flags);
-
-       if (engine->stats.enabled > 0) {
-               ktime_t last;
-
-               if (engine->stats.active && --engine->stats.active == 0) {
-                       /*
-                        * Decrement the active context count and in case GPU
-                        * is now idle add up to the running total.
-                        */
-                       last = ktime_sub(ktime_get(), engine->stats.start);
-
-                       engine->stats.total = ktime_add(engine->stats.total,
-                                                       last);
-               } else if (engine->stats.active == 0) {
-                       /*
-                        * After turning on engine stats, context out might be
-                        * the first event in which case we account from the
-                        * time stats gathering was turned on.
-                        */
-                       last = ktime_sub(ktime_get(), engine->stats.enabled_at);
-
-                       engine->stats.total = ktime_add(engine->stats.total,
-                                                       last);
-               }
+       if (atomic_dec_and_test(&engine->stats.active)) {
+               engine->stats.total =
+                       ktime_add(engine->stats.total,
+                                 ktime_sub(ktime_get(), engine->stats.start));
        }
-
        write_sequnlock_irqrestore(&engine->stats.lock, flags);
 }
 
-static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
-{
-       if (INTEL_GEN(engine->i915) >= 12)
-               return 0x60;
-       else if (INTEL_GEN(engine->i915) >= 9)
-               return 0x54;
-       else if (engine->class == RENDER_CLASS)
-               return 0x58;
-       else
-               return -1;
-}
-
 static void
 execlists_check_context(const struct intel_context *ce,
                        const struct intel_engine_cs *engine)
@@ -1132,14 +1260,12 @@ execlists_check_context(const struct intel_context *ce,
 static void restore_default_state(struct intel_context *ce,
                                  struct intel_engine_cs *engine)
 {
-       u32 *regs = ce->lrc_reg_state;
+       u32 *regs;
 
-       if (engine->pinned_default_state)
-               memcpy(regs, /* skip restoring the vanilla PPHWSP */
-                      engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
-                      engine->context_size - PAGE_SIZE);
+       regs = memset(ce->lrc_reg_state, 0, engine->context_size - PAGE_SIZE);
+       execlists_init_reg_state(regs, ce, engine, ce->ring, true);
 
-       execlists_init_reg_state(regs, ce, engine, ce->ring, false);
+       ce->runtime.last = intel_context_get_runtime(ce);
 }
 
 static void reset_active(struct i915_request *rq,
@@ -1181,17 +1307,6 @@ static void reset_active(struct i915_request *rq,
        ce->lrc.desc |= CTX_DESC_FORCE_RESTORE;
 }
 
-static u32 intel_context_get_runtime(const struct intel_context *ce)
-{
-       /*
-        * We can use either ppHWSP[16] which is recorded before the context
-        * switch (and so excludes the cost of context switches) or use the
-        * value from the context image itself, which is saved/restored earlier
-        * and so includes the cost of the save.
-        */
-       return READ_ONCE(ce->lrc_reg_state[CTX_TIMESTAMP]);
-}
-
 static void st_update_runtime_underflow(struct intel_context *ce, s32 dt)
 {
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
@@ -1243,7 +1358,7 @@ __execlists_schedule_in(struct i915_request *rq)
                ce->lrc.ccid = ce->tag;
        } else {
                /* We don't need a strict matching tag, just different values */
-               unsigned int tag = ffs(engine->context_tag);
+               unsigned int tag = ffs(READ_ONCE(engine->context_tag));
 
                GEM_BUG_ON(tag == 0 || tag >= BITS_PER_LONG);
                clear_bit(tag - 1, &engine->context_tag);
@@ -1417,6 +1532,24 @@ static inline void write_desc(struct intel_engine_execlists *execlists, u64 desc
        }
 }
 
+static __maybe_unused char *
+dump_port(char *buf, int buflen, const char *prefix, struct i915_request *rq)
+{
+       if (!rq)
+               return "";
+
+       snprintf(buf, buflen, "%sccid:%x %llx:%lld%s prio %d",
+                prefix,
+                rq->context->lrc.ccid,
+                rq->fence.context, rq->fence.seqno,
+                i915_request_completed(rq) ? "!" :
+                i915_request_started(rq) ? "*" :
+                "",
+                rq_prio(rq));
+
+       return buf;
+}
+
 static __maybe_unused void
 trace_ports(const struct intel_engine_execlists *execlists,
            const char *msg,
@@ -1424,18 +1557,14 @@ trace_ports(const struct intel_engine_execlists *execlists,
 {
        const struct intel_engine_cs *engine =
                container_of(execlists, typeof(*engine), execlists);
+       char __maybe_unused p0[40], p1[40];
 
        if (!ports[0])
                return;
 
-       ENGINE_TRACE(engine, "%s { %llx:%lld%s, %llx:%lld }\n", msg,
-                    ports[0]->fence.context,
-                    ports[0]->fence.seqno,
-                    i915_request_completed(ports[0]) ? "!" :
-                    i915_request_started(ports[0]) ? "*" :
-                    "",
-                    ports[1] ? ports[1]->fence.context : 0,
-                    ports[1] ? ports[1]->fence.seqno : 0);
+       ENGINE_TRACE(engine, "%s { %s%s }\n", msg,
+                    dump_port(p0, sizeof(p0), "", ports[0]),
+                    dump_port(p1, sizeof(p1), ", ", ports[1]));
 }
 
 static inline bool
@@ -1448,9 +1577,12 @@ static __maybe_unused bool
 assert_pending_valid(const struct intel_engine_execlists *execlists,
                     const char *msg)
 {
+       struct intel_engine_cs *engine =
+               container_of(execlists, typeof(*engine), execlists);
        struct i915_request * const *port, *rq;
        struct intel_context *ce = NULL;
        bool sentinel = false;
+       u32 ccid = -1;
 
        trace_ports(execlists, msg, execlists->pending);
 
@@ -1459,13 +1591,14 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
                return true;
 
        if (!execlists->pending[0]) {
-               GEM_TRACE_ERR("Nothing pending for promotion!\n");
+               GEM_TRACE_ERR("%s: Nothing pending for promotion!\n",
+                             engine->name);
                return false;
        }
 
        if (execlists->pending[execlists_num_ports(execlists)]) {
-               GEM_TRACE_ERR("Excess pending[%d] for promotion!\n",
-                             execlists_num_ports(execlists));
+               GEM_TRACE_ERR("%s: Excess pending[%d] for promotion!\n",
+                             engine->name, execlists_num_ports(execlists));
                return false;
        }
 
@@ -1477,20 +1610,31 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
                GEM_BUG_ON(!i915_request_is_active(rq));
 
                if (ce == rq->context) {
-                       GEM_TRACE_ERR("Dup context:%llx in pending[%zd]\n",
+                       GEM_TRACE_ERR("%s: Dup context:%llx in pending[%zd]\n",
+                                     engine->name,
                                      ce->timeline->fence_context,
                                      port - execlists->pending);
                        return false;
                }
                ce = rq->context;
 
+               if (ccid == ce->lrc.ccid) {
+                       GEM_TRACE_ERR("%s: Dup ccid:%x context:%llx in pending[%zd]\n",
+                                     engine->name,
+                                     ccid, ce->timeline->fence_context,
+                                     port - execlists->pending);
+                       return false;
+               }
+               ccid = ce->lrc.ccid;
+
                /*
                 * Sentinels are supposed to be lonely so they flush the
                 * current exection off the HW. Check that they are the
                 * only request in the pending submission.
                 */
                if (sentinel) {
-                       GEM_TRACE_ERR("context:%llx after sentinel in pending[%zd]\n",
+                       GEM_TRACE_ERR("%s: context:%llx after sentinel in pending[%zd]\n",
+                                     engine->name,
                                      ce->timeline->fence_context,
                                      port - execlists->pending);
                        return false;
@@ -1498,7 +1642,8 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
 
                sentinel = i915_request_has_sentinel(rq);
                if (sentinel && port != execlists->pending) {
-                       GEM_TRACE_ERR("sentinel context:%llx not in prime position[%zd]\n",
+                       GEM_TRACE_ERR("%s: sentinel context:%llx not in prime position[%zd]\n",
+                                     engine->name,
                                      ce->timeline->fence_context,
                                      port - execlists->pending);
                        return false;
@@ -1513,7 +1658,8 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
 
                if (i915_active_is_idle(&ce->active) &&
                    !intel_context_is_barrier(ce)) {
-                       GEM_TRACE_ERR("Inactive context:%llx in pending[%zd]\n",
+                       GEM_TRACE_ERR("%s: Inactive context:%llx in pending[%zd]\n",
+                                     engine->name,
                                      ce->timeline->fence_context,
                                      port - execlists->pending);
                        ok = false;
@@ -1521,7 +1667,8 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
                }
 
                if (!i915_vma_is_pinned(ce->state)) {
-                       GEM_TRACE_ERR("Unpinned context:%llx in pending[%zd]\n",
+                       GEM_TRACE_ERR("%s: Unpinned context:%llx in pending[%zd]\n",
+                                     engine->name,
                                      ce->timeline->fence_context,
                                      port - execlists->pending);
                        ok = false;
@@ -1529,7 +1676,8 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
                }
 
                if (!i915_vma_is_pinned(ce->ring->vma)) {
-                       GEM_TRACE_ERR("Unpinned ring:%llx in pending[%zd]\n",
+                       GEM_TRACE_ERR("%s: Unpinned ring:%llx in pending[%zd]\n",
+                                     engine->name,
                                      ce->timeline->fence_context,
                                      port - execlists->pending);
                        ok = false;
@@ -1664,30 +1812,16 @@ static bool virtual_matches(const struct virtual_engine *ve,
        return true;
 }
 
-static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
-                                    struct i915_request *rq)
+static void virtual_xfer_breadcrumbs(struct virtual_engine *ve)
 {
-       struct intel_engine_cs *old = ve->siblings[0];
-
-       /* All unattached (rq->engine == old) must already be completed */
-
-       spin_lock(&old->breadcrumbs.irq_lock);
-       if (!list_empty(&ve->context.signal_link)) {
-               list_del_init(&ve->context.signal_link);
-
-               /*
-                * We cannot acquire the new engine->breadcrumbs.irq_lock
-                * (as we are holding a breadcrumbs.irq_lock already),
-                * so attach this request to the signaler on submission.
-                * The queued irq_work will occur when we finally drop
-                * the engine->active.lock after dequeue.
-                */
-               set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags);
-
-               /* Also transfer the pending irq_work for the old breadcrumb. */
-               intel_engine_signal_breadcrumbs(rq->engine);
-       }
-       spin_unlock(&old->breadcrumbs.irq_lock);
+       /*
+        * All the outstanding signals on ve->siblings[0] must have
+        * been completed, just pending the interrupt handler. As those
+        * signals still refer to the old sibling (via rq->engine), we must
+        * transfer those to the old irq_worker to keep our locking
+        * consistent.
+        */
+       intel_engine_transfer_stale_breadcrumbs(ve->siblings[0], &ve->context);
 }
 
 #define for_each_waiter(p__, rq__) \
@@ -1729,7 +1863,8 @@ static void defer_request(struct i915_request *rq, struct list_head * const pl)
                                continue;
 
                        /* No waiter should start before its signaler */
-                       GEM_BUG_ON(i915_request_started(w) &&
+                       GEM_BUG_ON(i915_request_has_initial_breadcrumb(w) &&
+                                  i915_request_started(w) &&
                                   !i915_request_completed(rq));
 
                        GEM_BUG_ON(i915_request_is_active(w));
@@ -1831,16 +1966,25 @@ static unsigned long active_timeslice(const struct intel_engine_cs *engine)
 
 static void set_timeslice(struct intel_engine_cs *engine)
 {
+       unsigned long duration;
+
        if (!intel_engine_has_timeslices(engine))
                return;
 
-       set_timer_ms(&engine->execlists.timer, active_timeslice(engine));
+       duration = active_timeslice(engine);
+       ENGINE_TRACE(engine, "bump timeslicing, interval:%lu", duration);
+
+       set_timer_ms(&engine->execlists.timer, duration);
 }
 
 static void start_timeslice(struct intel_engine_cs *engine)
 {
        struct intel_engine_execlists *execlists = &engine->execlists;
-       int prio = queue_prio(execlists);
+       const int prio = queue_prio(execlists);
+       unsigned long duration;
+
+       if (!intel_engine_has_timeslices(engine))
+               return;
 
        WRITE_ONCE(execlists->switch_priority_hint, prio);
        if (prio == INT_MIN)
@@ -1849,7 +1993,12 @@ static void start_timeslice(struct intel_engine_cs *engine)
        if (timer_pending(&execlists->timer))
                return;
 
-       set_timer_ms(&execlists->timer, timeslice(engine));
+       duration = timeslice(engine);
+       ENGINE_TRACE(engine,
+                    "start timeslicing, prio:%d, interval:%lu",
+                    prio, duration);
+
+       set_timer_ms(&execlists->timer, duration);
 }
 
 static void record_preemption(struct intel_engine_execlists *execlists)
@@ -1946,11 +2095,26 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
         * of trouble.
         */
        active = READ_ONCE(execlists->active);
-       while ((last = *active) && i915_request_completed(last))
-               active++;
 
-       if (last) {
+       /*
+        * In theory we can skip over completed contexts that have not
+        * yet been processed by events (as those events are in flight):
+        *
+        * while ((last = *active) && i915_request_completed(last))
+        *      active++;
+        *
+        * However, the GPU cannot handle this as it will ultimately
+        * find itself trying to jump back into a context it has just
+        * completed and barf.
+        */
+
+       if ((last = *active)) {
                if (need_preempt(engine, last, rb)) {
+                       if (i915_request_completed(last)) {
+                               tasklet_hi_schedule(&execlists->tasklet);
+                               return;
+                       }
+
                        ENGINE_TRACE(engine,
                                     "preempting last=%llx:%lld, prio=%d, hint=%d\n",
                                     last->fence.context,
@@ -1978,6 +2142,11 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                        last = NULL;
                } else if (need_timeslice(engine, last) &&
                           timeslice_expired(execlists, last)) {
+                       if (i915_request_completed(last)) {
+                               tasklet_hi_schedule(&execlists->tasklet);
+                               return;
+                       }
+
                        ENGINE_TRACE(engine,
                                     "expired last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n",
                                     last->fence.context,
@@ -2087,7 +2256,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                                                                        engine);
 
                                if (!list_empty(&ve->context.signals))
-                                       virtual_xfer_breadcrumbs(ve, rq);
+                                       virtual_xfer_breadcrumbs(ve);
 
                                /*
                                 * Move the bound engine to the top of the list
@@ -2246,8 +2415,8 @@ done:
                clear_ports(port + 1, last_port - port);
 
                WRITE_ONCE(execlists->yield, -1);
-               execlists_submit_ports(engine);
                set_preempt_timeout(engine, *active);
+               execlists_submit_ports(engine);
        } else {
 skip_submit:
                ring_set_paused(engine, 0);
@@ -2417,8 +2586,6 @@ static void process_csb(struct intel_engine_cs *engine)
                if (promote) {
                        struct i915_request * const *old = execlists->active;
 
-                       GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
-
                        ring_set_paused(engine, 0);
 
                        /* Point active to the new ELSP; prevent overwriting */
@@ -2431,6 +2598,7 @@ static void process_csb(struct intel_engine_cs *engine)
                                execlists_schedule_out(*old++);
 
                        /* switch pending to inflight */
+                       GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
                        memcpy(execlists->inflight,
                               execlists->pending,
                               execlists_num_ports(execlists) *
@@ -2449,16 +2617,20 @@ static void process_csb(struct intel_engine_cs *engine)
                         * We rely on the hardware being strongly
                         * ordered, that the breadcrumb write is
                         * coherent (visible from the CPU) before the
-                        * user interrupt and CSB is processed.
+                        * user interrupt is processed. One might assume
+                        * that the breadcrumb write being before the
+                        * user interrupt and the CS event for the context
+                        * switch would therefore be before the CS event
+                        * itself...
                         */
                        if (GEM_SHOW_DEBUG() &&
-                           !i915_request_completed(*execlists->active) &&
-                           !reset_in_progress(execlists)) {
-                               struct i915_request *rq __maybe_unused =
-                                       *execlists->active;
+                           !i915_request_completed(*execlists->active)) {
+                               struct i915_request *rq = *execlists->active;
                                const u32 *regs __maybe_unused =
                                        rq->context->lrc_reg_state;
 
+                               ENGINE_TRACE(engine,
+                                            "context completed before request!\n");
                                ENGINE_TRACE(engine,
                                             "ring:{start:0x%08x, head:%04x, tail:%04x, ctl:%08x, mode:%08x}\n",
                                             ENGINE_READ(engine, RING_START),
@@ -2478,8 +2650,6 @@ static void process_csb(struct intel_engine_cs *engine)
                                             regs[CTX_RING_START],
                                             regs[CTX_RING_HEAD],
                                             regs[CTX_RING_TAIL]);
-
-                               GEM_BUG_ON("context completed before request");
                        }
 
                        execlists_schedule_out(*execlists->active++);
@@ -2769,6 +2939,45 @@ err_cap:
        return NULL;
 }
 
+static struct i915_request *
+active_context(struct intel_engine_cs *engine, u32 ccid)
+{
+       const struct intel_engine_execlists * const el = &engine->execlists;
+       struct i915_request * const *port, *rq;
+
+       /*
+        * Use the most recent result from process_csb(), but just in case
+        * we trigger an error (via interrupt) before the first CS event has
+        * been written, peek at the next submission.
+        */
+
+       for (port = el->active; (rq = *port); port++) {
+               if (rq->context->lrc.ccid == ccid) {
+                       ENGINE_TRACE(engine,
+                                    "ccid found at active:%zd\n",
+                                    port - el->active);
+                       return rq;
+               }
+       }
+
+       for (port = el->pending; (rq = *port); port++) {
+               if (rq->context->lrc.ccid == ccid) {
+                       ENGINE_TRACE(engine,
+                                    "ccid found at pending:%zd\n",
+                                    port - el->pending);
+                       return rq;
+               }
+       }
+
+       ENGINE_TRACE(engine, "ccid:%x not found\n", ccid);
+       return NULL;
+}
+
+static u32 active_ccid(struct intel_engine_cs *engine)
+{
+       return ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI);
+}
+
 static bool execlists_capture(struct intel_engine_cs *engine)
 {
        struct execlists_capture *cap;
@@ -2786,7 +2995,7 @@ static bool execlists_capture(struct intel_engine_cs *engine)
                return true;
 
        spin_lock_irq(&engine->active.lock);
-       cap->rq = execlists_active(&engine->execlists);
+       cap->rq = active_context(engine, active_ccid(engine));
        if (cap->rq) {
                cap->rq = active_request(cap->rq->context->timeline, cap->rq);
                cap->rq = i915_request_get_rcu(cap->rq);
@@ -2934,10 +3143,14 @@ static void __submit_queue_imm(struct intel_engine_cs *engine)
        if (reset_in_progress(execlists))
                return; /* defer until we restart the engine following reset */
 
-       if (execlists->tasklet.func == execlists_submission_tasklet)
-               __execlists_submission_tasklet(engine);
-       else
-               tasklet_hi_schedule(&execlists->tasklet);
+       /* Hopefully we clear execlists->pending[] to let us through */
+       if (READ_ONCE(execlists->pending[0]) &&
+           tasklet_trylock(&execlists->tasklet)) {
+               process_csb(engine);
+               tasklet_unlock(&execlists->tasklet);
+       }
+
+       __execlists_submission_tasklet(engine);
 }
 
 static void submit_queue(struct intel_engine_cs *engine,
@@ -3023,42 +3236,174 @@ check_redzone(const void *vaddr, const struct intel_engine_cs *engine)
        vaddr += engine->context_size;
 
        if (memchr_inv(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE))
-               dev_err_once(engine->i915->drm.dev,
+               drm_err_once(&engine->i915->drm,
                             "%s context redzone overwritten!\n",
                             engine->name);
 }
 
 static void execlists_context_unpin(struct intel_context *ce)
 {
-       check_redzone((void *)ce->lrc_reg_state - LRC_STATE_PN * PAGE_SIZE,
+       check_redzone((void *)ce->lrc_reg_state - LRC_STATE_OFFSET,
                      ce->engine);
 
        i915_gem_object_unpin_map(ce->state->obj);
 }
 
-static void
-__execlists_update_reg_state(const struct intel_context *ce,
-                            const struct intel_engine_cs *engine,
-                            u32 head)
-{
-       struct intel_ring *ring = ce->ring;
-       u32 *regs = ce->lrc_reg_state;
+static u32 *
+gen12_emit_timestamp_wa(const struct intel_context *ce, u32 *cs)
+{
+       *cs++ = MI_LOAD_REGISTER_MEM_GEN8 |
+               MI_SRM_LRM_GLOBAL_GTT |
+               MI_LRI_LRM_CS_MMIO;
+       *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
+       *cs++ = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET +
+               CTX_TIMESTAMP * sizeof(u32);
+       *cs++ = 0;
 
-       GEM_BUG_ON(!intel_ring_offset_valid(ring, head));
-       GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
+       *cs++ = MI_LOAD_REGISTER_REG |
+               MI_LRR_SOURCE_CS_MMIO |
+               MI_LRI_LRM_CS_MMIO;
+       *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
+       *cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(0));
 
-       regs[CTX_RING_START] = i915_ggtt_offset(ring->vma);
-       regs[CTX_RING_HEAD] = head;
-       regs[CTX_RING_TAIL] = ring->tail;
-       regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
+       *cs++ = MI_LOAD_REGISTER_REG |
+               MI_LRR_SOURCE_CS_MMIO |
+               MI_LRI_LRM_CS_MMIO;
+       *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
+       *cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(0));
 
-       /* RPCS */
-       if (engine->class == RENDER_CLASS) {
-               regs[CTX_R_PWR_CLK_STATE] =
-                       intel_sseu_make_rpcs(engine->i915, &ce->sseu);
+       return cs;
+}
+
+static u32 *
+gen12_emit_restore_scratch(const struct intel_context *ce, u32 *cs)
+{
+       GEM_BUG_ON(lrc_ring_gpr0(ce->engine) == -1);
+
+       *cs++ = MI_LOAD_REGISTER_MEM_GEN8 |
+               MI_SRM_LRM_GLOBAL_GTT |
+               MI_LRI_LRM_CS_MMIO;
+       *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
+       *cs++ = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET +
+               (lrc_ring_gpr0(ce->engine) + 1) * sizeof(u32);
+       *cs++ = 0;
+
+       return cs;
+}
+
+static u32 *
+gen12_emit_cmd_buf_wa(const struct intel_context *ce, u32 *cs)
+{
+       GEM_BUG_ON(lrc_ring_cmd_buf_cctl(ce->engine) == -1);
+
+       *cs++ = MI_LOAD_REGISTER_MEM_GEN8 |
+               MI_SRM_LRM_GLOBAL_GTT |
+               MI_LRI_LRM_CS_MMIO;
+       *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
+       *cs++ = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET +
+               (lrc_ring_cmd_buf_cctl(ce->engine) + 1) * sizeof(u32);
+       *cs++ = 0;
+
+       *cs++ = MI_LOAD_REGISTER_REG |
+               MI_LRR_SOURCE_CS_MMIO |
+               MI_LRI_LRM_CS_MMIO;
+       *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
+       *cs++ = i915_mmio_reg_offset(RING_CMD_BUF_CCTL(0));
+
+       return cs;
+}
+
+static u32 *
+gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs)
+{
+       cs = gen12_emit_timestamp_wa(ce, cs);
+       cs = gen12_emit_cmd_buf_wa(ce, cs);
+       cs = gen12_emit_restore_scratch(ce, cs);
+
+       return cs;
+}
+
+static u32 *
+gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs)
+{
+       cs = gen12_emit_timestamp_wa(ce, cs);
+       cs = gen12_emit_restore_scratch(ce, cs);
+
+       return cs;
+}
+
+static inline u32 context_wa_bb_offset(const struct intel_context *ce)
+{
+       return PAGE_SIZE * ce->wa_bb_page;
+}
+
+static u32 *context_indirect_bb(const struct intel_context *ce)
+{
+       void *ptr;
+
+       GEM_BUG_ON(!ce->wa_bb_page);
+
+       ptr = ce->lrc_reg_state;
+       ptr -= LRC_STATE_OFFSET; /* back to start of context image */
+       ptr += context_wa_bb_offset(ce);
+
+       return ptr;
+}
+
+static void
+setup_indirect_ctx_bb(const struct intel_context *ce,
+                     const struct intel_engine_cs *engine,
+                     u32 *(*emit)(const struct intel_context *, u32 *))
+{
+       u32 * const start = context_indirect_bb(ce);
+       u32 *cs;
+
+       cs = emit(ce, start);
+       GEM_BUG_ON(cs - start > I915_GTT_PAGE_SIZE / sizeof(*cs));
+       while ((unsigned long)cs % CACHELINE_BYTES)
+               *cs++ = MI_NOOP;
+
+       lrc_ring_setup_indirect_ctx(ce->lrc_reg_state, engine,
+                                   i915_ggtt_offset(ce->state) +
+                                   context_wa_bb_offset(ce),
+                                   (cs - start) * sizeof(*cs));
+}
+
+static void
+__execlists_update_reg_state(const struct intel_context *ce,
+                            const struct intel_engine_cs *engine,
+                            u32 head)
+{
+       struct intel_ring *ring = ce->ring;
+       u32 *regs = ce->lrc_reg_state;
+
+       GEM_BUG_ON(!intel_ring_offset_valid(ring, head));
+       GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
+
+       regs[CTX_RING_START] = i915_ggtt_offset(ring->vma);
+       regs[CTX_RING_HEAD] = head;
+       regs[CTX_RING_TAIL] = ring->tail;
+       regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
+
+       /* RPCS */
+       if (engine->class == RENDER_CLASS) {
+               regs[CTX_R_PWR_CLK_STATE] =
+                       intel_sseu_make_rpcs(engine->i915, &ce->sseu);
 
                i915_oa_init_reg_state(ce, engine);
        }
+
+       if (ce->wa_bb_page) {
+               u32 *(*fn)(const struct intel_context *ce, u32 *cs);
+
+               fn = gen12_emit_indirect_ctx_xcs;
+               if (ce->engine->class == RENDER_CLASS)
+                       fn = gen12_emit_indirect_ctx_rcs;
+
+               /* Mutually exclusive wrt to global indirect bb */
+               GEM_BUG_ON(engine->wa_ctx.indirect_ctx.size);
+               setup_indirect_ctx_bb(ce, engine, fn);
+       }
 }
 
 static int
@@ -3077,7 +3422,7 @@ __execlists_context_pin(struct intel_context *ce,
                return PTR_ERR(vaddr);
 
        ce->lrc.lrca = lrc_descriptor(ce, engine) | CTX_DESC_FORCE_RESTORE;
-       ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
+       ce->lrc_reg_state = vaddr + LRC_STATE_OFFSET;
        __execlists_update_reg_state(ce, engine, ce->ring->tail);
 
        return 0;
@@ -3125,6 +3470,7 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq)
 {
        u32 *cs;
 
+       GEM_BUG_ON(i915_request_has_initial_breadcrumb(rq));
        if (!i915_request_timeline(rq)->has_initial_breadcrumb)
                return 0;
 
@@ -3151,6 +3497,56 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq)
        /* Record the updated position of the request's payload */
        rq->infix = intel_ring_offset(rq, cs);
 
+       __set_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags);
+
+       return 0;
+}
+
+static int emit_pdps(struct i915_request *rq)
+{
+       const struct intel_engine_cs * const engine = rq->engine;
+       struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(rq->context->vm);
+       int err, i;
+       u32 *cs;
+
+       GEM_BUG_ON(intel_vgpu_active(rq->i915));
+
+       /*
+        * Beware ye of the dragons, this sequence is magic!
+        *
+        * Small changes to this sequence can cause anything from
+        * GPU hangs to forcewake errors and machine lockups!
+        */
+
+       /* Flush any residual operations from the context load */
+       err = engine->emit_flush(rq, EMIT_FLUSH);
+       if (err)
+               return err;
+
+       /* Magic required to prevent forcewake errors! */
+       err = engine->emit_flush(rq, EMIT_INVALIDATE);
+       if (err)
+               return err;
+
+       cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
+       if (IS_ERR(cs))
+               return PTR_ERR(cs);
+
+       /* Ensure the LRI have landed before we invalidate & continue */
+       *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
+       for (i = GEN8_3LVL_PDPES; i--; ) {
+               const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
+               u32 base = engine->mmio_base;
+
+               *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
+               *cs++ = upper_32_bits(pd_daddr);
+               *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
+               *cs++ = lower_32_bits(pd_daddr);
+       }
+       *cs++ = MI_NOOP;
+
+       intel_ring_advance(rq, cs);
+
        return 0;
 }
 
@@ -3175,6 +3571,12 @@ static int execlists_request_alloc(struct i915_request *request)
         * to cancel/unwind this request now.
         */
 
+       if (!i915_vm_is_4lvl(request->context->vm)) {
+               ret = emit_pdps(request);
+               if (ret)
+                       return ret;
+       }
+
        /* Unconditionally invalidate GPU caches and TLBs. */
        ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
        if (ret)
@@ -3475,7 +3877,8 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
 
        ret = lrc_setup_wa_ctx(engine);
        if (ret) {
-               DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
+               drm_dbg(&engine->i915->drm,
+                       "Failed to setup context WA page: %d\n", ret);
                return ret;
        }
 
@@ -3508,6 +3911,72 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
        return ret;
 }
 
+static void reset_csb_pointers(struct intel_engine_cs *engine)
+{
+       struct intel_engine_execlists * const execlists = &engine->execlists;
+       const unsigned int reset_value = execlists->csb_size - 1;
+
+       ring_set_paused(engine, 0);
+
+       /*
+        * Sometimes Icelake forgets to reset its pointers on a GPU reset.
+        * Bludgeon them with a mmio update to be sure.
+        */
+       ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR,
+                    0xffff << 16 | reset_value << 8 | reset_value);
+       ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
+
+       /*
+        * After a reset, the HW starts writing into CSB entry [0]. We
+        * therefore have to set our HEAD pointer back one entry so that
+        * the *first* entry we check is entry 0. To complicate this further,
+        * as we don't wait for the first interrupt after reset, we have to
+        * fake the HW write to point back to the last entry so that our
+        * inline comparison of our cached head position against the last HW
+        * write works even before the first interrupt.
+        */
+       execlists->csb_head = reset_value;
+       WRITE_ONCE(*execlists->csb_write, reset_value);
+       wmb(); /* Make sure this is visible to HW (paranoia?) */
+
+       invalidate_csb_entries(&execlists->csb_status[0],
+                              &execlists->csb_status[reset_value]);
+
+       /* Once more for luck and our trusty paranoia */
+       ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR,
+                    0xffff << 16 | reset_value << 8 | reset_value);
+       ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
+
+       GEM_BUG_ON(READ_ONCE(*execlists->csb_write) != reset_value);
+}
+
+static void execlists_sanitize(struct intel_engine_cs *engine)
+{
+       /*
+        * Poison residual state on resume, in case the suspend didn't!
+        *
+        * We have to assume that across suspend/resume (or other loss
+        * of control) that the contents of our pinned buffers has been
+        * lost, replaced by garbage. Since this doesn't always happen,
+        * let's poison such state so that we more quickly spot when
+        * we falsely assume it has been preserved.
+        */
+       if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+               memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
+
+       reset_csb_pointers(engine);
+
+       /*
+        * The kernel_context HWSP is stored in the status_page. As above,
+        * that may be lost on resume/initialisation, and so we need to
+        * reset the value in the HWSP.
+        */
+       intel_timeline_reset_seqno(engine->kernel_context->timeline);
+
+       /* And scrub the dirty cachelines for the HWSP */
+       clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
+}
+
 static void enable_error_interrupt(struct intel_engine_cs *engine)
 {
        u32 status;
@@ -3518,7 +3987,7 @@ static void enable_error_interrupt(struct intel_engine_cs *engine)
 
        status = ENGINE_READ(engine, RING_ESR);
        if (unlikely(status)) {
-               dev_err(engine->i915->drm.dev,
+               drm_err(&engine->i915->drm,
                        "engine '%s' resumed still in error: %08x\n",
                        engine->name, status);
                __intel_gt_reset(engine->gt, engine->mask);
@@ -3582,7 +4051,8 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine)
        bool unexpected = false;
 
        if (ENGINE_READ_FW(engine, RING_MI_MODE) & STOP_RING) {
-               DRM_DEBUG_DRIVER("STOP_RING still set in RING_MI_MODE\n");
+               drm_dbg(&engine->i915->drm,
+                       "STOP_RING still set in RING_MI_MODE\n");
                unexpected = true;
        }
 
@@ -3642,39 +4112,10 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine)
         *
         * FIXME: Wa for more modern gens needs to be validated
         */
+       ring_set_paused(engine, 1);
        intel_engine_stop_cs(engine);
-}
-
-static void reset_csb_pointers(struct intel_engine_cs *engine)
-{
-       struct intel_engine_execlists * const execlists = &engine->execlists;
-       const unsigned int reset_value = execlists->csb_size - 1;
-
-       ring_set_paused(engine, 0);
 
-       /*
-        * After a reset, the HW starts writing into CSB entry [0]. We
-        * therefore have to set our HEAD pointer back one entry so that
-        * the *first* entry we check is entry 0. To complicate this further,
-        * as we don't wait for the first interrupt after reset, we have to
-        * fake the HW write to point back to the last entry so that our
-        * inline comparison of our cached head position against the last HW
-        * write works even before the first interrupt.
-        */
-       execlists->csb_head = reset_value;
-       WRITE_ONCE(*execlists->csb_write, reset_value);
-       wmb(); /* Make sure this is visible to HW (paranoia?) */
-
-       /*
-        * Sometimes Icelake forgets to reset its pointers on a GPU reset.
-        * Bludgeon them with a mmio update to be sure.
-        */
-       ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR,
-                    reset_value << 8 | reset_value);
-       ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
-
-       invalidate_csb_entries(&execlists->csb_status[0],
-                              &execlists->csb_status[reset_value]);
+       engine->execlists.reset_ccid = active_ccid(engine);
 }
 
 static void __reset_stop_ring(u32 *regs, const struct intel_engine_cs *engine)
@@ -3717,7 +4158,7 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
         * its request, it was still running at the time of the
         * reset and will have been clobbered.
         */
-       rq = execlists_active(execlists);
+       rq = active_context(engine, engine->execlists.reset_ccid);
        if (!rq)
                goto unwind;
 
@@ -3767,8 +4208,6 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
         * image back to the expected values to skip over the guilty request.
         */
        __i915_request_reset(rq, stalled);
-       if (!stalled)
-               goto out_replay;
 
        /*
         * We want a simple context + ring to execute the breadcrumb update.
@@ -3778,9 +4217,6 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
         * future request will be after userspace has had the opportunity
         * to recreate its own state.
         */
-       GEM_BUG_ON(!intel_context_is_pinned(ce));
-       restore_default_state(ce, engine);
-
 out_replay:
        ENGINE_TRACE(engine, "replay {head:%04x, tail:%04x}\n",
                     head, ce->ring->tail);
@@ -4146,6 +4582,42 @@ static u32 preparser_disable(bool state)
        return MI_ARB_CHECK | 1 << 8 | state;
 }
 
+static i915_reg_t aux_inv_reg(const struct intel_engine_cs *engine)
+{
+       static const i915_reg_t vd[] = {
+               GEN12_VD0_AUX_NV,
+               GEN12_VD1_AUX_NV,
+               GEN12_VD2_AUX_NV,
+               GEN12_VD3_AUX_NV,
+       };
+
+       static const i915_reg_t ve[] = {
+               GEN12_VE0_AUX_NV,
+               GEN12_VE1_AUX_NV,
+       };
+
+       if (engine->class == VIDEO_DECODE_CLASS)
+               return vd[engine->instance];
+
+       if (engine->class == VIDEO_ENHANCEMENT_CLASS)
+               return ve[engine->instance];
+
+       GEM_BUG_ON("unknown aux_inv_reg\n");
+
+       return INVALID_MMIO_REG;
+}
+
+static u32 *
+gen12_emit_aux_table_inv(const i915_reg_t inv_reg, u32 *cs)
+{
+       *cs++ = MI_LOAD_REGISTER_IMM(1);
+       *cs++ = i915_mmio_reg_offset(inv_reg);
+       *cs++ = AUX_INV;
+       *cs++ = MI_NOOP;
+
+       return cs;
+}
+
 static int gen12_emit_flush_render(struct i915_request *request,
                                   u32 mode)
 {
@@ -4154,13 +4626,13 @@ static int gen12_emit_flush_render(struct i915_request *request,
                u32 *cs;
 
                flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
+               flags |= PIPE_CONTROL_FLUSH_L3;
                flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
                flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
                /* Wa_1409600907:tgl */
                flags |= PIPE_CONTROL_DEPTH_STALL;
                flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
                flags |= PIPE_CONTROL_FLUSH_ENABLE;
-               flags |= PIPE_CONTROL_HDC_PIPELINE_FLUSH;
 
                flags |= PIPE_CONTROL_STORE_DATA_INDEX;
                flags |= PIPE_CONTROL_QW_WRITE;
@@ -4171,7 +4643,9 @@ static int gen12_emit_flush_render(struct i915_request *request,
                if (IS_ERR(cs))
                        return PTR_ERR(cs);
 
-               cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+               cs = gen12_emit_pipe_control(cs,
+                                            PIPE_CONTROL0_HDC_PIPELINE_FLUSH,
+                                            flags, LRC_PPHWSP_SCRATCH_ADDR);
                intel_ring_advance(request, cs);
        }
 
@@ -4186,14 +4660,13 @@ static int gen12_emit_flush_render(struct i915_request *request,
                flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
                flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
                flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
-               flags |= PIPE_CONTROL_L3_RO_CACHE_INVALIDATE;
 
                flags |= PIPE_CONTROL_STORE_DATA_INDEX;
                flags |= PIPE_CONTROL_QW_WRITE;
 
                flags |= PIPE_CONTROL_CS_STALL;
 
-               cs = intel_ring_begin(request, 8);
+               cs = intel_ring_begin(request, 8 + 4);
                if (IS_ERR(cs))
                        return PTR_ERR(cs);
 
@@ -4206,6 +4679,9 @@ static int gen12_emit_flush_render(struct i915_request *request,
 
                cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
 
+               /* hsdes: 1809175790 */
+               cs = gen12_emit_aux_table_inv(GEN12_GFX_CCS_AUX_NV, cs);
+
                *cs++ = preparser_disable(false);
                intel_ring_advance(request, cs);
        }
@@ -4213,6 +4689,56 @@ static int gen12_emit_flush_render(struct i915_request *request,
        return 0;
 }
 
+static int gen12_emit_flush(struct i915_request *request, u32 mode)
+{
+       intel_engine_mask_t aux_inv = 0;
+       u32 cmd, *cs;
+
+       if (mode & EMIT_INVALIDATE)
+               aux_inv = request->engine->mask & ~BIT(BCS0);
+
+       cs = intel_ring_begin(request,
+                             4 + (aux_inv ? 2 * hweight8(aux_inv) + 2 : 0));
+       if (IS_ERR(cs))
+               return PTR_ERR(cs);
+
+       cmd = MI_FLUSH_DW + 1;
+
+       /* We always require a command barrier so that subsequent
+        * commands, such as breadcrumb interrupts, are strictly ordered
+        * wrt the contents of the write cache being flushed to memory
+        * (and thus being coherent from the CPU).
+        */
+       cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
+
+       if (mode & EMIT_INVALIDATE) {
+               cmd |= MI_INVALIDATE_TLB;
+               if (request->engine->class == VIDEO_DECODE_CLASS)
+                       cmd |= MI_INVALIDATE_BSD;
+       }
+
+       *cs++ = cmd;
+       *cs++ = LRC_PPHWSP_SCRATCH_ADDR;
+       *cs++ = 0; /* upper addr */
+       *cs++ = 0; /* value */
+
+       if (aux_inv) { /* hsdes: 1809175790 */
+               struct intel_engine_cs *engine;
+               unsigned int tmp;
+
+               *cs++ = MI_LOAD_REGISTER_IMM(hweight8(aux_inv));
+               for_each_engine_masked(engine, request->engine->gt,
+                                      aux_inv, tmp) {
+                       *cs++ = i915_mmio_reg_offset(aux_inv_reg(engine));
+                       *cs++ = AUX_INV;
+               }
+               *cs++ = MI_NOOP;
+       }
+       intel_ring_advance(request, cs);
+
+       return 0;
+}
+
 /*
  * Reserve space for 2 NOOPs at the end of each request to be
  * used as a workaround for not being allowed to do lite
@@ -4242,8 +4768,7 @@ static u32 *emit_preempt_busywait(struct i915_request *request, u32 *cs)
 }
 
 static __always_inline u32*
-gen8_emit_fini_breadcrumb_footer(struct i915_request *request,
-                                u32 *cs)
+gen8_emit_fini_breadcrumb_tail(struct i915_request *request, u32 *cs)
 {
        *cs++ = MI_USER_INTERRUPT;
 
@@ -4257,14 +4782,16 @@ gen8_emit_fini_breadcrumb_footer(struct i915_request *request,
        return gen8_emit_wa_tail(request, cs);
 }
 
-static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
+static u32 *emit_xcs_breadcrumb(struct i915_request *request, u32 *cs)
 {
-       cs = gen8_emit_ggtt_write(cs,
-                                 request->fence.seqno,
-                                 i915_request_active_timeline(request)->hwsp_offset,
-                                 0);
+       u32 addr = i915_request_active_timeline(request)->hwsp_offset;
 
-       return gen8_emit_fini_breadcrumb_footer(request, cs);
+       return gen8_emit_ggtt_write(cs, request->fence.seqno, addr, 0);
+}
+
+static u32 *gen8_emit_fini_breadcrumb(struct i915_request *rq, u32 *cs)
+{
+       return gen8_emit_fini_breadcrumb_tail(rq, emit_xcs_breadcrumb(rq, cs));
 }
 
 static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
@@ -4282,7 +4809,7 @@ static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
                                      PIPE_CONTROL_FLUSH_ENABLE |
                                      PIPE_CONTROL_CS_STALL);
 
-       return gen8_emit_fini_breadcrumb_footer(request, cs);
+       return gen8_emit_fini_breadcrumb_tail(request, cs);
 }
 
 static u32 *
@@ -4298,7 +4825,7 @@ gen11_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
                                      PIPE_CONTROL_DC_FLUSH_ENABLE |
                                      PIPE_CONTROL_FLUSH_ENABLE);
 
-       return gen8_emit_fini_breadcrumb_footer(request, cs);
+       return gen8_emit_fini_breadcrumb_tail(request, cs);
 }
 
 /*
@@ -4336,7 +4863,7 @@ static u32 *gen12_emit_preempt_busywait(struct i915_request *request, u32 *cs)
 }
 
 static __always_inline u32*
-gen12_emit_fini_breadcrumb_footer(struct i915_request *request, u32 *cs)
+gen12_emit_fini_breadcrumb_tail(struct i915_request *request, u32 *cs)
 {
        *cs++ = MI_USER_INTERRUPT;
 
@@ -4350,33 +4877,29 @@ gen12_emit_fini_breadcrumb_footer(struct i915_request *request, u32 *cs)
        return gen8_emit_wa_tail(request, cs);
 }
 
-static u32 *gen12_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
+static u32 *gen12_emit_fini_breadcrumb(struct i915_request *rq, u32 *cs)
 {
-       cs = gen8_emit_ggtt_write(cs,
-                                 request->fence.seqno,
-                                 i915_request_active_timeline(request)->hwsp_offset,
-                                 0);
-
-       return gen12_emit_fini_breadcrumb_footer(request, cs);
+       return gen12_emit_fini_breadcrumb_tail(rq, emit_xcs_breadcrumb(rq, cs));
 }
 
 static u32 *
 gen12_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
 {
-       cs = gen8_emit_ggtt_write_rcs(cs,
-                                     request->fence.seqno,
-                                     i915_request_active_timeline(request)->hwsp_offset,
-                                     PIPE_CONTROL_CS_STALL |
-                                     PIPE_CONTROL_TILE_CACHE_FLUSH |
-                                     PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
-                                     PIPE_CONTROL_DEPTH_CACHE_FLUSH |
-                                     /* Wa_1409600907:tgl */
-                                     PIPE_CONTROL_DEPTH_STALL |
-                                     PIPE_CONTROL_DC_FLUSH_ENABLE |
-                                     PIPE_CONTROL_FLUSH_ENABLE |
-                                     PIPE_CONTROL_HDC_PIPELINE_FLUSH);
+       cs = gen12_emit_ggtt_write_rcs(cs,
+                                      request->fence.seqno,
+                                      i915_request_active_timeline(request)->hwsp_offset,
+                                      PIPE_CONTROL0_HDC_PIPELINE_FLUSH,
+                                      PIPE_CONTROL_CS_STALL |
+                                      PIPE_CONTROL_TILE_CACHE_FLUSH |
+                                      PIPE_CONTROL_FLUSH_L3 |
+                                      PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+                                      PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+                                      /* Wa_1409600907:tgl */
+                                      PIPE_CONTROL_DEPTH_STALL |
+                                      PIPE_CONTROL_DC_FLUSH_ENABLE |
+                                      PIPE_CONTROL_FLUSH_ENABLE);
 
-       return gen12_emit_fini_breadcrumb_footer(request, cs);
+       return gen12_emit_fini_breadcrumb_tail(request, cs);
 }
 
 static void execlists_park(struct intel_engine_cs *engine)
@@ -4428,6 +4951,8 @@ static void execlists_shutdown(struct intel_engine_cs *engine)
 
 static void execlists_release(struct intel_engine_cs *engine)
 {
+       engine->sanitize = NULL; /* no longer in control, nothing to sanitize */
+
        execlists_shutdown(engine);
 
        intel_engine_cleanup_common(engine);
@@ -4447,9 +4972,10 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
        engine->emit_flush = gen8_emit_flush;
        engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
        engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb;
-       if (INTEL_GEN(engine->i915) >= 12)
+       if (INTEL_GEN(engine->i915) >= 12) {
                engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb;
-
+               engine->emit_flush = gen12_emit_flush;
+       }
        engine->set_default_submission = intel_execlists_set_default_submission;
 
        if (INTEL_GEN(engine->i915) < 11) {
@@ -4530,7 +5056,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
                 * because we only expect rare glitches but nothing
                 * critical to prevent us from using GPU
                 */
-               DRM_ERROR("WA batch buffer initialization failed\n");
+               drm_err(&i915->drm, "WA batch buffer initialization failed\n");
 
        if (HAS_LOGICAL_RING_ELSQ(i915)) {
                execlists->submit_reg = uncore->regs +
@@ -4558,48 +5084,13 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
                execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32);
        }
 
-       reset_csb_pointers(engine);
-
        /* Finally, take ownership and responsibility for cleanup! */
+       engine->sanitize = execlists_sanitize;
        engine->release = execlists_release;
 
        return 0;
 }
 
-static u32 intel_lr_indirect_ctx_offset(const struct intel_engine_cs *engine)
-{
-       u32 indirect_ctx_offset;
-
-       switch (INTEL_GEN(engine->i915)) {
-       default:
-               MISSING_CASE(INTEL_GEN(engine->i915));
-               /* fall through */
-       case 12:
-               indirect_ctx_offset =
-                       GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
-               break;
-       case 11:
-               indirect_ctx_offset =
-                       GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
-               break;
-       case 10:
-               indirect_ctx_offset =
-                       GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
-               break;
-       case 9:
-               indirect_ctx_offset =
-                       GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
-               break;
-       case 8:
-               indirect_ctx_offset =
-                       GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
-               break;
-       }
-
-       return indirect_ctx_offset;
-}
-
-
 static void init_common_reg_state(u32 * const regs,
                                  const struct intel_engine_cs *engine,
                                  const struct intel_ring *ring,
@@ -4617,30 +5108,27 @@ static void init_common_reg_state(u32 * const regs,
        regs[CTX_CONTEXT_CONTROL] = ctl;
 
        regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
+       regs[CTX_TIMESTAMP] = 0;
 }
 
 static void init_wa_bb_reg_state(u32 * const regs,
-                                const struct intel_engine_cs *engine,
-                                u32 pos_bb_per_ctx)
+                                const struct intel_engine_cs *engine)
 {
        const struct i915_ctx_workarounds * const wa_ctx = &engine->wa_ctx;
 
        if (wa_ctx->per_ctx.size) {
                const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
 
-               regs[pos_bb_per_ctx] =
+               GEM_BUG_ON(lrc_ring_wa_bb_per_ctx(engine) == -1);
+               regs[lrc_ring_wa_bb_per_ctx(engine) + 1] =
                        (ggtt_offset + wa_ctx->per_ctx.offset) | 0x01;
        }
 
        if (wa_ctx->indirect_ctx.size) {
-               const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
-
-               regs[pos_bb_per_ctx + 2] =
-                       (ggtt_offset + wa_ctx->indirect_ctx.offset) |
-                       (wa_ctx->indirect_ctx.size / CACHELINE_BYTES);
-
-               regs[pos_bb_per_ctx + 4] =
-                       intel_lr_indirect_ctx_offset(engine) << 6;
+               lrc_ring_setup_indirect_ctx(regs, engine,
+                                           i915_ggtt_offset(wa_ctx->vma) +
+                                           wa_ctx->indirect_ctx.offset,
+                                           wa_ctx->indirect_ctx.size);
        }
 }
 
@@ -4689,10 +5177,7 @@ static void execlists_init_reg_state(u32 *regs,
        init_common_reg_state(regs, engine, ring, inhibit);
        init_ppgtt_reg_state(regs, vm_alias(ce->vm));
 
-       init_wa_bb_reg_state(regs, engine,
-                            INTEL_GEN(engine->i915) >= 12 ?
-                            GEN12_CTX_BB_PER_CTX_PTR :
-                            CTX_BB_PER_CTX_PTR);
+       init_wa_bb_reg_state(regs, engine);
 
        __reset_stop_ring(regs, engine);
 }
@@ -4705,29 +5190,18 @@ populate_lr_context(struct intel_context *ce,
 {
        bool inhibit = true;
        void *vaddr;
-       int ret;
 
        vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
        if (IS_ERR(vaddr)) {
-               ret = PTR_ERR(vaddr);
-               DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
-               return ret;
+               drm_dbg(&engine->i915->drm, "Could not map object pages!\n");
+               return PTR_ERR(vaddr);
        }
 
        set_redzone(vaddr, engine);
 
        if (engine->default_state) {
-               void *defaults;
-
-               defaults = i915_gem_object_pin_map(engine->default_state,
-                                                  I915_MAP_WB);
-               if (IS_ERR(defaults)) {
-                       ret = PTR_ERR(defaults);
-                       goto err_unpin_ctx;
-               }
-
-               memcpy(vaddr, defaults, engine->context_size);
-               i915_gem_object_unpin_map(engine->default_state);
+               shmem_read(engine->default_state, 0,
+                          vaddr, engine->context_size);
                __set_bit(CONTEXT_VALID_BIT, &ce->flags);
                inhibit = false;
        }
@@ -4739,14 +5213,12 @@ populate_lr_context(struct intel_context *ce,
         * The second page of the context object contains some registers which
         * must be set up prior to the first execution.
         */
-       execlists_init_reg_state(vaddr + LRC_STATE_PN * PAGE_SIZE,
+       execlists_init_reg_state(vaddr + LRC_STATE_OFFSET,
                                 ce, engine, ring, inhibit);
 
-       ret = 0;
-err_unpin_ctx:
        __i915_gem_object_flush_map(ctx_obj, 0, engine->context_size);
        i915_gem_object_unpin_map(ctx_obj);
-       return ret;
+       return 0;
 }
 
 static int __execlists_context_alloc(struct intel_context *ce,
@@ -4764,6 +5236,11 @@ static int __execlists_context_alloc(struct intel_context *ce,
        if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
                context_size += I915_GTT_PAGE_SIZE; /* for redzone */
 
+       if (INTEL_GEN(engine->i915) == 12) {
+               ce->wa_bb_page = context_size / PAGE_SIZE;
+               context_size += PAGE_SIZE;
+       }
+
        ctx_obj = i915_gem_object_create_shmem(engine->i915, context_size);
        if (IS_ERR(ctx_obj))
                return PTR_ERR(ctx_obj);
@@ -4803,7 +5280,8 @@ static int __execlists_context_alloc(struct intel_context *ce,
 
        ret = populate_lr_context(ce, ctx_obj, engine, ring);
        if (ret) {
-               DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
+               drm_dbg(&engine->i915->drm,
+                       "Failed to populate LRC: %d\n", ret);
                goto error_ring_free;
        }
 
@@ -4856,6 +5334,8 @@ static void virtual_context_destroy(struct kref *kref)
                __execlists_context_fini(&ve->context);
        intel_context_fini(&ve->context);
 
+       intel_engine_free_request_pool(&ve->base);
+
        kfree(ve->bonds);
        kfree(ve);
 }
@@ -4980,12 +5460,15 @@ static void virtual_submission_tasklet(unsigned long data)
                return;
 
        local_irq_disable();
-       for (n = 0; READ_ONCE(ve->request) && n < ve->num_siblings; n++) {
-               struct intel_engine_cs *sibling = ve->siblings[n];
+       for (n = 0; n < ve->num_siblings; n++) {
+               struct intel_engine_cs *sibling = READ_ONCE(ve->siblings[n]);
                struct ve_node * const node = &ve->nodes[sibling->id];
                struct rb_node **parent, *rb;
                bool first;
 
+               if (!READ_ONCE(ve->request))
+                       break; /* already handled by a sibling's tasklet */
+
                if (unlikely(!(mask & sibling->mask))) {
                        if (!RB_EMPTY_NODE(&node->rb)) {
                                spin_lock(&sibling->active.lock);
@@ -5036,10 +5519,8 @@ static void virtual_submission_tasklet(unsigned long data)
 submit_engine:
                GEM_BUG_ON(RB_EMPTY_NODE(&node->rb));
                node->prio = prio;
-               if (first && prio > sibling->execlists.queue_priority_hint) {
-                       sibling->execlists.queue_priority_hint = prio;
+               if (first && prio > sibling->execlists.queue_priority_hint)
                        tasklet_hi_schedule(&sibling->execlists.tasklet);
-               }
 
                spin_unlock(&sibling->active.lock);
        }
index dfbc214e14f5feab5b634876fc55c3c675d24c5f..91fd8e452d9bb12d9df65092cb16561fc47e543e 100644 (file)
@@ -90,6 +90,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine);
 #define LRC_PPHWSP_SZ  (1)
 /* After the PPHWSP we have the logical state for the context */
 #define LRC_STATE_PN   (LRC_PPHWSP_PN + LRC_PPHWSP_SZ)
+#define LRC_STATE_OFFSET (LRC_STATE_PN * PAGE_SIZE)
 
 /* Space within PPHWSP reserved to be used as scratch */
 #define LRC_PPHWSP_SCRATCH             0x34
index d39b72590e40a0be5890f76ecf9ea494a3f46eec..93cb6c460508b548b4f1788696a9ca2ab8380d61 100644 (file)
@@ -9,14 +9,13 @@
 
 #include <linux/types.h>
 
-/* GEN8 to GEN11 Reg State Context */
+/* GEN8 to GEN12 Reg State Context */
 #define CTX_CONTEXT_CONTROL            (0x02 + 1)
 #define CTX_RING_HEAD                  (0x04 + 1)
 #define CTX_RING_TAIL                  (0x06 + 1)
 #define CTX_RING_START                 (0x08 + 1)
 #define CTX_RING_CTL                   (0x0a + 1)
 #define CTX_BB_STATE                   (0x10 + 1)
-#define CTX_BB_PER_CTX_PTR             (0x18 + 1)
 #define CTX_TIMESTAMP                  (0x22 + 1)
 #define CTX_PDP3_UDW                   (0x24 + 1)
 #define CTX_PDP3_LDW                   (0x26 + 1)
@@ -30,9 +29,6 @@
 
 #define GEN9_CTX_RING_MI_MODE          0x54
 
-/* GEN12+ Reg State Context */
-#define GEN12_CTX_BB_PER_CTX_PTR               (0x12 + 1)
-
 #define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do { \
        u32 *reg_state__ = (reg_state); \
        const u64 addr__ = i915_page_dir_dma_addr((ppgtt), (n)); \
index 3847ee44b18199e13a547e40b19d8b32cfbe41c3..ab675d35030d7fb5c47c4fec5a3de09cddd21e2e 100644 (file)
@@ -113,7 +113,6 @@ static void gen9_rc6_enable(struct intel_rc6 *rc6)
        struct intel_uncore *uncore = rc6_to_uncore(rc6);
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
-       u32 rc6_mode;
 
        /* 2b: Program RC6 thresholds.*/
        if (INTEL_GEN(rc6_to_i915(rc6)) >= 10) {
@@ -165,16 +164,11 @@ static void gen9_rc6_enable(struct intel_rc6 *rc6)
        /* 3a: Enable RC6 */
        set(uncore, GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
 
-       /* WaRsUseTimeoutMode:cnl (pre-prod) */
-       if (IS_CNL_REVID(rc6_to_i915(rc6), CNL_REVID_A0, CNL_REVID_C0))
-               rc6_mode = GEN7_RC_CTL_TO_MODE;
-       else
-               rc6_mode = GEN6_RC_CTL_EI_MODE(1);
 
        rc6->ctl_enable =
                GEN6_RC_CTL_HW_ENABLE |
                GEN6_RC_CTL_RC6_ENABLE |
-               rc6_mode;
+               GEN6_RC_CTL_EI_MODE(1);
 
        /*
         * WaRsDisableCoarsePowerGating:skl,cnl
@@ -246,16 +240,18 @@ static void gen6_rc6_enable(struct intel_rc6 *rc6)
        ret = sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS,
                                     &rc6vids, NULL);
        if (IS_GEN(i915, 6) && ret) {
-               DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
+               drm_dbg(&i915->drm, "Couldn't check for BIOS workaround\n");
        } else if (IS_GEN(i915, 6) &&
                   (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
-               DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
-                                GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
+               drm_dbg(&i915->drm,
+                       "You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
+                       GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
                rc6vids &= 0xffff00;
                rc6vids |= GEN6_ENCODE_RC6_VID(450);
                ret = sandybridge_pcode_write(i915, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
                if (ret)
-                       DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
+                       drm_err(&i915->drm,
+                               "Couldn't fix incorrect rc6 voltage\n");
        }
 }
 
@@ -263,14 +259,15 @@ static void gen6_rc6_enable(struct intel_rc6 *rc6)
 static int chv_rc6_init(struct intel_rc6 *rc6)
 {
        struct intel_uncore *uncore = rc6_to_uncore(rc6);
+       struct drm_i915_private *i915 = rc6_to_i915(rc6);
        resource_size_t pctx_paddr, paddr;
        resource_size_t pctx_size = 32 * SZ_1K;
        u32 pcbr;
 
        pcbr = intel_uncore_read(uncore, VLV_PCBR);
        if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
-               DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
-               paddr = rc6_to_i915(rc6)->dsm.end + 1 - pctx_size;
+               drm_dbg(&i915->drm, "BIOS didn't set up PCBR, fixing up\n");
+               paddr = i915->dsm.end + 1 - pctx_size;
                GEM_BUG_ON(paddr > U32_MAX);
 
                pctx_paddr = (paddr & ~4095);
@@ -304,7 +301,7 @@ static int vlv_rc6_init(struct intel_rc6 *rc6)
                goto out;
        }
 
-       DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
+       drm_dbg(&i915->drm, "BIOS didn't set up PCBR, fixing up\n");
 
        /*
         * From the Gunit register HAS:
@@ -316,7 +313,8 @@ static int vlv_rc6_init(struct intel_rc6 *rc6)
         */
        pctx = i915_gem_object_create_stolen(i915, pctx_size);
        if (IS_ERR(pctx)) {
-               DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
+               drm_dbg(&i915->drm,
+                       "not enough stolen space for PCTX, disabling\n");
                return PTR_ERR(pctx);
        }
 
@@ -398,14 +396,14 @@ static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6)
        rc_sw_target = intel_uncore_read(uncore, GEN6_RC_STATE);
        rc_sw_target &= RC_SW_TARGET_STATE_MASK;
        rc_sw_target >>= RC_SW_TARGET_STATE_SHIFT;
-       DRM_DEBUG_DRIVER("BIOS enabled RC states: "
+       drm_dbg(&i915->drm, "BIOS enabled RC states: "
                         "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
                         onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
                         onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
                         rc_sw_target);
 
        if (!(intel_uncore_read(uncore, RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
-               DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
+               drm_dbg(&i915->drm, "RC6 Base location not set properly.\n");
                enable_rc6 = false;
        }
 
@@ -417,7 +415,7 @@ static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6)
                intel_uncore_read(uncore, RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
        if (!(rc6_ctx_base >= i915->dsm_reserved.start &&
              rc6_ctx_base + PAGE_SIZE < i915->dsm_reserved.end)) {
-               DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
+               drm_dbg(&i915->drm, "RC6 Base address not as expected.\n");
                enable_rc6 = false;
        }
 
@@ -425,24 +423,25 @@ static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6)
              (intel_uncore_read(uncore, PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1 &&
              (intel_uncore_read(uncore, PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1 &&
              (intel_uncore_read(uncore, PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1)) {
-               DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
+               drm_dbg(&i915->drm,
+                       "Engine Idle wait time not set properly.\n");
                enable_rc6 = false;
        }
 
        if (!intel_uncore_read(uncore, GEN8_PUSHBUS_CONTROL) ||
            !intel_uncore_read(uncore, GEN8_PUSHBUS_ENABLE) ||
            !intel_uncore_read(uncore, GEN8_PUSHBUS_SHIFT)) {
-               DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
+               drm_dbg(&i915->drm, "Pushbus not setup properly.\n");
                enable_rc6 = false;
        }
 
        if (!intel_uncore_read(uncore, GEN6_GFXPAUSE)) {
-               DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
+               drm_dbg(&i915->drm, "GFX pause not setup properly.\n");
                enable_rc6 = false;
        }
 
        if (!intel_uncore_read(uncore, GEN8_MISC_CTRL0)) {
-               DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
+               drm_dbg(&i915->drm, "GPM control not setup properly.\n");
                enable_rc6 = false;
        }
 
@@ -463,7 +462,7 @@ static bool rc6_supported(struct intel_rc6 *rc6)
                return false;
 
        if (IS_GEN9_LP(i915) && !bxt_check_bios_rc6_setup(rc6)) {
-               dev_notice(i915->drm.dev,
+               drm_notice(&i915->drm,
                           "RC6 and powersaving disabled by BIOS\n");
                return false;
        }
@@ -495,7 +494,7 @@ static bool pctx_corrupted(struct intel_rc6 *rc6)
        if (intel_uncore_read(rc6_to_uncore(rc6), GEN8_RC6_CTX_INFO))
                return false;
 
-       dev_notice(i915->drm.dev,
+       drm_notice(&i915->drm,
                   "RC6 context corruption, disabling runtime power management\n");
        return true;
 }
index 5954ecc3207f21efed541308f2b4158ac296016b..f59e7875cc5ec9255252d6a0f0e7636a5aab0461 100644 (file)
@@ -102,7 +102,7 @@ static int render_state_setup(struct intel_renderstate *so,
        }
 
        if (rodata->reloc[reloc_index] != -1) {
-               DRM_ERROR("only %d relocs resolved\n", reloc_index);
+               drm_err(&i915->drm, "only %d relocs resolved\n", reloc_index);
                goto err;
        }
 
@@ -194,7 +194,7 @@ int intel_renderstate_init(struct intel_renderstate *so,
 
        err = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
        if (err)
-               goto err_vma;
+               goto err_obj;
 
        err = render_state_setup(so, engine->i915);
        if (err)
@@ -204,8 +204,6 @@ int intel_renderstate_init(struct intel_renderstate *so,
 
 err_unpin:
        i915_vma_unpin(so->vma);
-err_vma:
-       i915_vma_close(so->vma);
 err_obj:
        i915_gem_object_put(obj);
        so->vma = NULL;
@@ -221,6 +219,14 @@ int intel_renderstate_emit(struct intel_renderstate *so,
        if (!so->vma)
                return 0;
 
+       i915_vma_lock(so->vma);
+       err = i915_request_await_object(rq, so->vma->obj, false);
+       if (err == 0)
+               err = i915_vma_move_to_active(so->vma, rq, 0);
+       i915_vma_unlock(so->vma);
+       if (err)
+               return err;
+
        err = engine->emit_bb_start(rq,
                                    so->batch_offset, so->batch_size,
                                    I915_DISPATCH_SECURE);
@@ -235,13 +241,7 @@ int intel_renderstate_emit(struct intel_renderstate *so,
                        return err;
        }
 
-       i915_vma_lock(so->vma);
-       err = i915_request_await_object(rq, so->vma->obj, false);
-       if (err == 0)
-               err = i915_vma_move_to_active(so->vma, rq, 0);
-       i915_vma_unlock(so->vma);
-
-       return err;
+       return 0;
 }
 
 void intel_renderstate_fini(struct intel_renderstate *so)
index 80db3c9d785ed2d413f68df3472a8de15f63c84d..39070b514e65a4a602cb891a4b6a691827b5e4e0 100644 (file)
@@ -109,7 +109,7 @@ static bool mark_guilty(struct i915_request *rq)
                goto out;
        }
 
-       dev_notice(ctx->i915->drm.dev,
+       drm_notice(&ctx->i915->drm,
                   "%s context reset due to GPU hang\n",
                   ctx->name);
 
@@ -755,7 +755,7 @@ static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
        for_each_engine(engine, gt, id)
                __intel_engine_reset(engine, stalled_mask & engine->mask);
 
-       i915_gem_restore_fences(gt->ggtt);
+       intel_ggtt_restore_fences(gt->ggtt);
 
        return err;
 }
@@ -1031,7 +1031,7 @@ void intel_gt_reset(struct intel_gt *gt,
                goto unlock;
 
        if (reason)
-               dev_notice(gt->i915->drm.dev,
+               drm_notice(&gt->i915->drm,
                           "Resetting chip for %s\n", reason);
        atomic_inc(&gt->i915->gpu_error.reset_count);
 
@@ -1039,7 +1039,7 @@ void intel_gt_reset(struct intel_gt *gt,
 
        if (!intel_has_gpu_reset(gt)) {
                if (i915_modparams.reset)
-                       dev_err(gt->i915->drm.dev, "GPU reset not supported\n");
+                       drm_err(&gt->i915->drm, "GPU reset not supported\n");
                else
                        drm_dbg(&gt->i915->drm, "GPU reset disabled\n");
                goto error;
@@ -1049,7 +1049,7 @@ void intel_gt_reset(struct intel_gt *gt,
                intel_runtime_pm_disable_interrupts(gt->i915);
 
        if (do_reset(gt, stalled_mask)) {
-               dev_err(gt->i915->drm.dev, "Failed to reset chip\n");
+               drm_err(&gt->i915->drm, "Failed to reset chip\n");
                goto taint;
        }
 
@@ -1111,7 +1111,7 @@ static inline int intel_gt_reset_engine(struct intel_engine_cs *engine)
 /**
  * intel_engine_reset - reset GPU engine to recover from a hang
  * @engine: engine to reset
- * @msg: reason for GPU reset; or NULL for no dev_notice()
+ * @msg: reason for GPU reset; or NULL for no drm_notice()
  *
  * Reset a specific GPU engine. Useful if a hang is detected.
  * Returns zero on successful reset or otherwise an error code.
@@ -1136,7 +1136,7 @@ int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
        reset_prepare_engine(engine);
 
        if (msg)
-               dev_notice(engine->i915->drm.dev,
+               drm_notice(&engine->i915->drm,
                           "Resetting %s for %s\n", engine->name, msg);
        atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]);
 
@@ -1381,7 +1381,7 @@ static void intel_wedge_me(struct work_struct *work)
 {
        struct intel_wedge_me *w = container_of(work, typeof(*w), work.work);
 
-       dev_err(w->gt->i915->drm.dev,
+       drm_err(&w->gt->i915->drm,
                "%s timed out, cancelling all in-flight rendering.\n",
                w->name);
        intel_gt_set_wedged(w->gt);
index 5bdce24994aa04ef80306a43e180c3599d81ee7c..cc0ebca65167f141a1a61211a1838eec2d1a0e9e 100644 (file)
@@ -88,6 +88,8 @@ static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
 static inline void
 assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
 {
+       unsigned int head = READ_ONCE(ring->head);
+
        GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
 
        /*
@@ -105,8 +107,7 @@ assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
         * into the same cacheline as ring->head.
         */
 #define cacheline(a) round_down(a, CACHELINE_BYTES)
-       GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) &&
-                  tail < ring->head);
+       GEM_BUG_ON(cacheline(tail) == cacheline(head) && tail < head);
 #undef cacheline
 }
 
index fdc3f10e12aae5c6fefaa1e59c5a7280ca5cea61..ca7286e584092423a08f0417f7f78429f317f2f8 100644 (file)
@@ -42,6 +42,7 @@
 #include "intel_reset.h"
 #include "intel_ring.h"
 #include "intel_workarounds.h"
+#include "shmem_utils.h"
 
 /* Rough estimate of the typical request size, performing a flush,
  * set-context and then emitting the batch.
@@ -577,8 +578,9 @@ static void flush_cs_tlb(struct intel_engine_cs *engine)
                                    RING_INSTPM(engine->mmio_base),
                                    INSTPM_SYNC_FLUSH, 0,
                                    1000))
-               DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
-                         engine->name);
+               drm_err(&dev_priv->drm,
+                       "%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
+                       engine->name);
 }
 
 static void ring_setup_status_page(struct intel_engine_cs *engine)
@@ -601,8 +603,9 @@ static bool stop_ring(struct intel_engine_cs *engine)
                                            MODE_IDLE,
                                            MODE_IDLE,
                                            1000)) {
-                       DRM_ERROR("%s : timed out trying to stop ring\n",
-                                 engine->name);
+                       drm_err(&dev_priv->drm,
+                               "%s : timed out trying to stop ring\n",
+                               engine->name);
 
                        /*
                         * Sometimes we observe that the idle flag is not
@@ -661,22 +664,23 @@ static int xcs_resume(struct intel_engine_cs *engine)
        /* WaClearRingBufHeadRegAtInit:ctg,elk */
        if (!stop_ring(engine)) {
                /* G45 ring initialization often fails to reset head to zero */
-               DRM_DEBUG_DRIVER("%s head not reset to zero "
+               drm_dbg(&dev_priv->drm, "%s head not reset to zero "
+                       "ctl %08x head %08x tail %08x start %08x\n",
+                       engine->name,
+                       ENGINE_READ(engine, RING_CTL),
+                       ENGINE_READ(engine, RING_HEAD),
+                       ENGINE_READ(engine, RING_TAIL),
+                       ENGINE_READ(engine, RING_START));
+
+               if (!stop_ring(engine)) {
+                       drm_err(&dev_priv->drm,
+                               "failed to set %s head to zero "
                                "ctl %08x head %08x tail %08x start %08x\n",
                                engine->name,
                                ENGINE_READ(engine, RING_CTL),
                                ENGINE_READ(engine, RING_HEAD),
                                ENGINE_READ(engine, RING_TAIL),
                                ENGINE_READ(engine, RING_START));
-
-               if (!stop_ring(engine)) {
-                       DRM_ERROR("failed to set %s head to zero "
-                                 "ctl %08x head %08x tail %08x start %08x\n",
-                                 engine->name,
-                                 ENGINE_READ(engine, RING_CTL),
-                                 ENGINE_READ(engine, RING_HEAD),
-                                 ENGINE_READ(engine, RING_TAIL),
-                                 ENGINE_READ(engine, RING_START));
                        ret = -EIO;
                        goto out;
                }
@@ -719,7 +723,7 @@ static int xcs_resume(struct intel_engine_cs *engine)
                                    RING_CTL(engine->mmio_base),
                                    RING_VALID, RING_VALID,
                                    50)) {
-               DRM_ERROR("%s initialization failed "
+               drm_err(&dev_priv->drm, "%s initialization failed "
                          "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
                          engine->name,
                          ENGINE_READ(engine, RING_CTL),
@@ -1238,7 +1242,7 @@ alloc_context_vma(struct intel_engine_cs *engine)
                i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC);
 
        if (engine->default_state) {
-               void *defaults, *vaddr;
+               void *vaddr;
 
                vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
                if (IS_ERR(vaddr)) {
@@ -1246,15 +1250,8 @@ alloc_context_vma(struct intel_engine_cs *engine)
                        goto err_obj;
                }
 
-               defaults = i915_gem_object_pin_map(engine->default_state,
-                                                  I915_MAP_WB);
-               if (IS_ERR(defaults)) {
-                       err = PTR_ERR(defaults);
-                       goto err_map;
-               }
-
-               memcpy(vaddr, defaults, engine->context_size);
-               i915_gem_object_unpin_map(engine->default_state);
+               shmem_read(engine->default_state, 0,
+                          vaddr, engine->context_size);
 
                i915_gem_object_flush_map(obj);
                i915_gem_object_unpin_map(obj);
@@ -1268,8 +1265,6 @@ alloc_context_vma(struct intel_engine_cs *engine)
 
        return vma;
 
-err_map:
-       i915_gem_object_unpin_map(obj);
 err_obj:
        i915_gem_object_put(obj);
        return ERR_PTR(err);
index 19542fd9e207e94325e9ea99a70972a0285d9182..2f59fc6df3c21173cb039849507dc79162ee10ea 100644 (file)
@@ -8,12 +8,15 @@
 
 #include "i915_drv.h"
 #include "intel_gt.h"
+#include "intel_gt_clock_utils.h"
 #include "intel_gt_irq.h"
 #include "intel_gt_pm_irq.h"
 #include "intel_rps.h"
 #include "intel_sideband.h"
 #include "../../../platform/x86/intel_ips.h"
 
+#define BUSY_MAX_EI    20u /* ms */
+
 /*
  * Lock protecting IPS related data structures
  */
@@ -44,6 +47,100 @@ static inline void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
        intel_uncore_write_fw(uncore, reg, val);
 }
 
+static void rps_timer(struct timer_list *t)
+{
+       struct intel_rps *rps = from_timer(rps, t, timer);
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+       s64 max_busy[3] = {};
+       ktime_t dt, last;
+
+       for_each_engine(engine, rps_to_gt(rps), id) {
+               s64 busy;
+               int i;
+
+               dt = intel_engine_get_busy_time(engine);
+               last = engine->stats.rps;
+               engine->stats.rps = dt;
+
+               busy = ktime_to_ns(ktime_sub(dt, last));
+               for (i = 0; i < ARRAY_SIZE(max_busy); i++) {
+                       if (busy > max_busy[i])
+                               swap(busy, max_busy[i]);
+               }
+       }
+
+       dt = ktime_get();
+       last = rps->pm_timestamp;
+       rps->pm_timestamp = dt;
+
+       if (intel_rps_is_active(rps)) {
+               s64 busy;
+               int i;
+
+               dt = ktime_sub(dt, last);
+
+               /*
+                * Our goal is to evaluate each engine independently, so we run
+                * at the lowest clocks required to sustain the heaviest
+                * workload. However, a task may be split into sequential
+                * dependent operations across a set of engines, such that
+                * the independent contributions do not account for high load,
+                * but overall the task is GPU bound. For example, consider
+                * video decode on vcs followed by colour post-processing
+                * on vecs, followed by general post-processing on rcs.
+                * Since multi-engines being active does imply a single
+                * continuous workload across all engines, we hedge our
+                * bets by only contributing a factor of the distributed
+                * load into our busyness calculation.
+                */
+               busy = max_busy[0];
+               for (i = 1; i < ARRAY_SIZE(max_busy); i++) {
+                       if (!max_busy[i])
+                               break;
+
+                       busy += div_u64(max_busy[i], 1 << i);
+               }
+               GT_TRACE(rps_to_gt(rps),
+                        "busy:%lld [%d%%], max:[%lld, %lld, %lld], interval:%d\n",
+                        busy, (int)div64_u64(100 * busy, dt),
+                        max_busy[0], max_busy[1], max_busy[2],
+                        rps->pm_interval);
+
+               if (100 * busy > rps->power.up_threshold * dt &&
+                   rps->cur_freq < rps->max_freq_softlimit) {
+                       rps->pm_iir |= GEN6_PM_RP_UP_THRESHOLD;
+                       rps->pm_interval = 1;
+                       schedule_work(&rps->work);
+               } else if (100 * busy < rps->power.down_threshold * dt &&
+                          rps->cur_freq > rps->min_freq_softlimit) {
+                       rps->pm_iir |= GEN6_PM_RP_DOWN_THRESHOLD;
+                       rps->pm_interval = 1;
+                       schedule_work(&rps->work);
+               } else {
+                       rps->last_adj = 0;
+               }
+
+               mod_timer(&rps->timer,
+                         jiffies + msecs_to_jiffies(rps->pm_interval));
+               rps->pm_interval = min(rps->pm_interval * 2, BUSY_MAX_EI);
+       }
+}
+
+static void rps_start_timer(struct intel_rps *rps)
+{
+       rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp);
+       rps->pm_interval = 1;
+       mod_timer(&rps->timer, jiffies + 1);
+}
+
+static void rps_stop_timer(struct intel_rps *rps)
+{
+       del_timer_sync(&rps->timer);
+       rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp);
+       cancel_work_sync(&rps->work);
+}
+
 static u32 rps_pm_mask(struct intel_rps *rps, u8 val)
 {
        u32 mask = 0;
@@ -57,7 +154,7 @@ static u32 rps_pm_mask(struct intel_rps *rps, u8 val)
        if (val < rps->max_freq_softlimit)
                mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
 
-       mask &= READ_ONCE(rps->pm_events);
+       mask &= rps->pm_events;
 
        return rps_pm_sanitize_mask(rps, ~mask);
 }
@@ -70,18 +167,11 @@ static void rps_reset_ei(struct intel_rps *rps)
 static void rps_enable_interrupts(struct intel_rps *rps)
 {
        struct intel_gt *gt = rps_to_gt(rps);
-       u32 events;
 
-       rps_reset_ei(rps);
+       GT_TRACE(gt, "interrupts:on rps->pm_events: %x, rps_pm_mask:%x\n",
+                rps->pm_events, rps_pm_mask(rps, rps->last_freq));
 
-       if (IS_VALLEYVIEW(gt->i915))
-               /* WaGsvRC0ResidencyMethod:vlv */
-               events = GEN6_PM_RP_UP_EI_EXPIRED;
-       else
-               events = (GEN6_PM_RP_UP_THRESHOLD |
-                         GEN6_PM_RP_DOWN_THRESHOLD |
-                         GEN6_PM_RP_DOWN_TIMEOUT);
-       WRITE_ONCE(rps->pm_events, events);
+       rps_reset_ei(rps);
 
        spin_lock_irq(&gt->irq_lock);
        gen6_gt_pm_enable_irq(gt, rps->pm_events);
@@ -120,8 +210,6 @@ static void rps_disable_interrupts(struct intel_rps *rps)
 {
        struct intel_gt *gt = rps_to_gt(rps);
 
-       WRITE_ONCE(rps->pm_events, 0);
-
        intel_uncore_write(gt->uncore,
                           GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u));
 
@@ -140,6 +228,7 @@ static void rps_disable_interrupts(struct intel_rps *rps)
        cancel_work_sync(&rps->work);
 
        rps_reset_interrupts(rps);
+       GT_TRACE(gt, "interrupts:off\n");
 }
 
 static const struct cparams {
@@ -186,14 +275,12 @@ static void gen5_rps_init(struct intel_rps *rps)
        fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
        fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
                MEMMODE_FSTART_SHIFT;
-       DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
-                        fmax, fmin, fstart);
+       drm_dbg(&i915->drm, "fmax: %d, fmin: %d, fstart: %d\n",
+               fmax, fmin, fstart);
 
        rps->min_freq = fmax;
+       rps->efficient_freq = fstart;
        rps->max_freq = fmin;
-
-       rps->idle_freq = rps->min_freq;
-       rps->cur_freq = rps->idle_freq;
 }
 
 static unsigned long
@@ -456,7 +543,8 @@ static bool gen5_rps_enable(struct intel_rps *rps)
 
        if (wait_for_atomic((intel_uncore_read(uncore, MEMSWCTL) &
                             MEMCTL_CMD_STS) == 0, 10))
-               DRM_ERROR("stuck trying to change perf mode\n");
+               drm_err(&uncore->i915->drm,
+                       "stuck trying to change perf mode\n");
        mdelay(1);
 
        gen5_rps_set(rps, rps->cur_freq);
@@ -533,8 +621,8 @@ static u32 rps_limits(struct intel_rps *rps, u8 val)
 
 static void rps_set_power(struct intel_rps *rps, int new_power)
 {
-       struct intel_uncore *uncore = rps_to_uncore(rps);
-       struct drm_i915_private *i915 = rps_to_i915(rps);
+       struct intel_gt *gt = rps_to_gt(rps);
+       struct intel_uncore *uncore = gt->uncore;
        u32 threshold_up = 0, threshold_down = 0; /* in % */
        u32 ei_up = 0, ei_down = 0;
 
@@ -543,55 +631,49 @@ static void rps_set_power(struct intel_rps *rps, int new_power)
        if (new_power == rps->power.mode)
                return;
 
+       threshold_up = 95;
+       threshold_down = 85;
+
        /* Note the units here are not exactly 1us, but 1280ns. */
        switch (new_power) {
        case LOW_POWER:
-               /* Upclock if more than 95% busy over 16ms */
                ei_up = 16000;
-               threshold_up = 95;
-
-               /* Downclock if less than 85% busy over 32ms */
                ei_down = 32000;
-               threshold_down = 85;
                break;
 
        case BETWEEN:
-               /* Upclock if more than 90% busy over 13ms */
                ei_up = 13000;
-               threshold_up = 90;
-
-               /* Downclock if less than 75% busy over 32ms */
                ei_down = 32000;
-               threshold_down = 75;
                break;
 
        case HIGH_POWER:
-               /* Upclock if more than 85% busy over 10ms */
                ei_up = 10000;
-               threshold_up = 85;
-
-               /* Downclock if less than 60% busy over 32ms */
                ei_down = 32000;
-               threshold_down = 60;
                break;
        }
 
        /* When byt can survive without system hang with dynamic
         * sw freq adjustments, this restriction can be lifted.
         */
-       if (IS_VALLEYVIEW(i915))
+       if (IS_VALLEYVIEW(gt->i915))
                goto skip_hw_write;
 
-       set(uncore, GEN6_RP_UP_EI, GT_INTERVAL_FROM_US(i915, ei_up));
+       GT_TRACE(gt,
+                "changing power mode [%d], up %d%% @ %dus, down %d%% @ %dus\n",
+                new_power, threshold_up, ei_up, threshold_down, ei_down);
+
+       set(uncore, GEN6_RP_UP_EI,
+           intel_gt_ns_to_pm_interval(gt, ei_up * 1000));
        set(uncore, GEN6_RP_UP_THRESHOLD,
-           GT_INTERVAL_FROM_US(i915, ei_up * threshold_up / 100));
+           intel_gt_ns_to_pm_interval(gt, ei_up * threshold_up * 10));
 
-       set(uncore, GEN6_RP_DOWN_EI, GT_INTERVAL_FROM_US(i915, ei_down));
+       set(uncore, GEN6_RP_DOWN_EI,
+           intel_gt_ns_to_pm_interval(gt, ei_down * 1000));
        set(uncore, GEN6_RP_DOWN_THRESHOLD,
-           GT_INTERVAL_FROM_US(i915, ei_down * threshold_down / 100));
+           intel_gt_ns_to_pm_interval(gt, ei_down * threshold_down * 10));
 
        set(uncore, GEN6_RP_CONTROL,
-           (INTEL_GEN(i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
+           (INTEL_GEN(gt->i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
            GEN6_RP_MEDIA_HW_NORMAL_MODE |
            GEN6_RP_MEDIA_IS_GFX |
            GEN6_RP_ENABLE |
@@ -646,9 +728,11 @@ static void gen6_rps_set_thresholds(struct intel_rps *rps, u8 val)
 
 void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive)
 {
+       GT_TRACE(rps_to_gt(rps), "mark interactive: %s\n", yesno(interactive));
+
        mutex_lock(&rps->power.mutex);
        if (interactive) {
-               if (!rps->power.interactive++ && READ_ONCE(rps->active))
+               if (!rps->power.interactive++ && intel_rps_is_active(rps))
                        rps_set_power(rps, HIGH_POWER);
        } else {
                GEM_BUG_ON(!rps->power.interactive);
@@ -673,6 +757,9 @@ static int gen6_rps_set(struct intel_rps *rps, u8 val)
                         GEN6_AGGRESSIVE_TURBO);
        set(uncore, GEN6_RPNSWREQ, swreq);
 
+       GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d, swreq:%x\n",
+                val, intel_gpu_freq(rps, val), swreq);
+
        return 0;
 }
 
@@ -685,6 +772,9 @@ static int vlv_rps_set(struct intel_rps *rps, u8 val)
        err = vlv_punit_write(i915, PUNIT_REG_GPU_FREQ_REQ, val);
        vlv_punit_put(i915);
 
+       GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d\n",
+                val, intel_gpu_freq(rps, val));
+
        return err;
 }
 
@@ -715,29 +805,30 @@ static int rps_set(struct intel_rps *rps, u8 val, bool update)
 
 void intel_rps_unpark(struct intel_rps *rps)
 {
-       u8 freq;
-
-       if (!rps->enabled)
+       if (!intel_rps_is_enabled(rps))
                return;
 
+       GT_TRACE(rps_to_gt(rps), "unpark:%x\n", rps->cur_freq);
+
        /*
         * Use the user's desired frequency as a guide, but for better
         * performance, jump directly to RPe as our starting frequency.
         */
        mutex_lock(&rps->lock);
 
-       WRITE_ONCE(rps->active, true);
-
-       freq = max(rps->cur_freq, rps->efficient_freq),
-       freq = clamp(freq, rps->min_freq_softlimit, rps->max_freq_softlimit);
-       intel_rps_set(rps, freq);
-
-       rps->last_adj = 0;
+       intel_rps_set_active(rps);
+       intel_rps_set(rps,
+                     clamp(rps->cur_freq,
+                           rps->min_freq_softlimit,
+                           rps->max_freq_softlimit));
 
        mutex_unlock(&rps->lock);
 
-       if (INTEL_GEN(rps_to_i915(rps)) >= 6)
+       rps->pm_iir = 0;
+       if (intel_rps_has_interrupts(rps))
                rps_enable_interrupts(rps);
+       if (intel_rps_uses_timer(rps))
+               rps_start_timer(rps);
 
        if (IS_GEN(rps_to_i915(rps), 5))
                gen5_rps_update(rps);
@@ -745,15 +836,16 @@ void intel_rps_unpark(struct intel_rps *rps)
 
 void intel_rps_park(struct intel_rps *rps)
 {
-       struct drm_i915_private *i915 = rps_to_i915(rps);
+       int adj;
 
-       if (!rps->enabled)
+       if (!intel_rps_clear_active(rps))
                return;
 
-       if (INTEL_GEN(i915) >= 6)
+       if (intel_rps_uses_timer(rps))
+               rps_stop_timer(rps);
+       if (intel_rps_has_interrupts(rps))
                rps_disable_interrupts(rps);
 
-       WRITE_ONCE(rps->active, false);
        if (rps->last_freq <= rps->idle_freq)
                return;
 
@@ -784,8 +876,15 @@ void intel_rps_park(struct intel_rps *rps)
         * (Note we accommodate Cherryview's limitation of only using an
         * even bin by applying it to all.)
         */
-       rps->cur_freq =
-               max_t(int, round_down(rps->cur_freq - 1, 2), rps->min_freq);
+       adj = rps->last_adj;
+       if (adj < 0)
+               adj *= 2;
+       else /* CHV needs even encode values */
+               adj = -2;
+       rps->last_adj = adj;
+       rps->cur_freq = max_t(int, rps->cur_freq + adj, rps->min_freq);
+
+       GT_TRACE(rps_to_gt(rps), "park:%x\n", rps->cur_freq);
 }
 
 void intel_rps_boost(struct i915_request *rq)
@@ -793,7 +892,7 @@ void intel_rps_boost(struct i915_request *rq)
        struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps;
        unsigned long flags;
 
-       if (i915_request_signaled(rq) || !READ_ONCE(rps->active))
+       if (i915_request_signaled(rq) || !intel_rps_is_active(rps))
                return;
 
        /* Serializes with i915_request_retire() */
@@ -802,6 +901,9 @@ void intel_rps_boost(struct i915_request *rq)
            !dma_fence_is_signaled_locked(&rq->fence)) {
                set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
 
+               GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n",
+                        rq->fence.context, rq->fence.seqno);
+
                if (!atomic_fetch_inc(&rps->num_waiters) &&
                    READ_ONCE(rps->cur_freq) < rps->boost_freq)
                        schedule_work(&rps->work);
@@ -819,7 +921,7 @@ int intel_rps_set(struct intel_rps *rps, u8 val)
        GEM_BUG_ON(val > rps->max_freq);
        GEM_BUG_ON(val < rps->min_freq);
 
-       if (rps->active) {
+       if (intel_rps_is_active(rps)) {
                err = rps_set(rps, val, true);
                if (err)
                        return err;
@@ -828,7 +930,7 @@ int intel_rps_set(struct intel_rps *rps, u8 val)
                 * Make sure we continue to get interrupts
                 * until we hit the minimum or maximum frequencies.
                 */
-               if (INTEL_GEN(rps_to_i915(rps)) >= 6) {
+               if (intel_rps_has_interrupts(rps)) {
                        struct intel_uncore *uncore = rps_to_uncore(rps);
 
                        set(uncore,
@@ -896,12 +998,14 @@ static void gen6_rps_init(struct intel_rps *rps)
 
 static bool rps_reset(struct intel_rps *rps)
 {
+       struct drm_i915_private *i915 = rps_to_i915(rps);
+
        /* force a reset */
        rps->power.mode = -1;
        rps->last_freq = -1;
 
        if (rps_set(rps, rps->min_freq, true)) {
-               DRM_ERROR("Failed to reset RPS to initial values\n");
+               drm_err(&i915->drm, "Failed to reset RPS to initial values\n");
                return false;
        }
 
@@ -912,20 +1016,18 @@ static bool rps_reset(struct intel_rps *rps)
 /* See the Gen9_GT_PM_Programming_Guide doc for the below */
 static bool gen9_rps_enable(struct intel_rps *rps)
 {
-       struct drm_i915_private *i915 = rps_to_i915(rps);
-       struct intel_uncore *uncore = rps_to_uncore(rps);
+       struct intel_gt *gt = rps_to_gt(rps);
+       struct intel_uncore *uncore = gt->uncore;
 
        /* Program defaults and thresholds for RPS */
-       if (IS_GEN(i915, 9))
+       if (IS_GEN(gt->i915, 9))
                intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ,
                                      GEN9_FREQUENCY(rps->rp1_freq));
 
-       /* 1 second timeout */
-       intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT,
-                             GT_INTERVAL_FROM_US(i915, 1000000));
-
        intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 0xa);
 
+       rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD;
+
        return rps_reset(rps);
 }
 
@@ -936,12 +1038,10 @@ static bool gen8_rps_enable(struct intel_rps *rps)
        intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ,
                              HSW_FREQUENCY(rps->rp1_freq));
 
-       /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
-       intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT,
-                             100000000 / 128); /* 1 second timeout */
-
        intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
 
+       rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD;
+
        return rps_reset(rps);
 }
 
@@ -953,6 +1053,10 @@ static bool gen6_rps_enable(struct intel_rps *rps)
        intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 50000);
        intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
 
+       rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD |
+                         GEN6_PM_RP_DOWN_THRESHOLD |
+                         GEN6_PM_RP_DOWN_TIMEOUT);
+
        return rps_reset(rps);
 }
 
@@ -1038,6 +1142,10 @@ static bool chv_rps_enable(struct intel_rps *rps)
                              GEN6_RP_UP_BUSY_AVG |
                              GEN6_RP_DOWN_IDLE_AVG);
 
+       rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD |
+                         GEN6_PM_RP_DOWN_THRESHOLD |
+                         GEN6_PM_RP_DOWN_TIMEOUT);
+
        /* Setting Fixed Bias */
        vlv_punit_get(i915);
 
@@ -1052,8 +1160,8 @@ static bool chv_rps_enable(struct intel_rps *rps)
        drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
                      "GPLL not enabled\n");
 
-       DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
-       DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
+       drm_dbg(&i915->drm, "GPLL enabled? %s\n", yesno(val & GPLLENABLE));
+       drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val);
 
        return rps_reset(rps);
 }
@@ -1136,6 +1244,9 @@ static bool vlv_rps_enable(struct intel_rps *rps)
                              GEN6_RP_UP_BUSY_AVG |
                              GEN6_RP_DOWN_IDLE_CONT);
 
+       /* WaGsvRC0ResidencyMethod:vlv */
+       rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED;
+
        vlv_punit_get(i915);
 
        /* Setting Fixed Bias */
@@ -1150,8 +1261,8 @@ static bool vlv_rps_enable(struct intel_rps *rps)
        drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
                      "GPLL not enabled\n");
 
-       DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
-       DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
+       drm_dbg(&i915->drm, "GPLL enabled? %s\n", yesno(val & GPLLENABLE));
+       drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val);
 
        return rps_reset(rps);
 }
@@ -1194,33 +1305,71 @@ static unsigned long __ips_gfx_val(struct intel_ips *ips)
        return ips->gfx_power + state2;
 }
 
+static bool has_busy_stats(struct intel_rps *rps)
+{
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+
+       for_each_engine(engine, rps_to_gt(rps), id) {
+               if (!intel_engine_supports_stats(engine))
+                       return false;
+       }
+
+       return true;
+}
+
 void intel_rps_enable(struct intel_rps *rps)
 {
        struct drm_i915_private *i915 = rps_to_i915(rps);
        struct intel_uncore *uncore = rps_to_uncore(rps);
+       bool enabled = false;
+
+       if (!HAS_RPS(i915))
+               return;
+
+       intel_gt_check_clock_frequency(rps_to_gt(rps));
 
        intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
-       if (IS_CHERRYVIEW(i915))
-               rps->enabled = chv_rps_enable(rps);
+       if (rps->max_freq <= rps->min_freq)
+               /* leave disabled, no room for dynamic reclocking */;
+       else if (IS_CHERRYVIEW(i915))
+               enabled = chv_rps_enable(rps);
        else if (IS_VALLEYVIEW(i915))
-               rps->enabled = vlv_rps_enable(rps);
+               enabled = vlv_rps_enable(rps);
        else if (INTEL_GEN(i915) >= 9)
-               rps->enabled = gen9_rps_enable(rps);
+               enabled = gen9_rps_enable(rps);
        else if (INTEL_GEN(i915) >= 8)
-               rps->enabled = gen8_rps_enable(rps);
+               enabled = gen8_rps_enable(rps);
        else if (INTEL_GEN(i915) >= 6)
-               rps->enabled = gen6_rps_enable(rps);
+               enabled = gen6_rps_enable(rps);
        else if (IS_IRONLAKE_M(i915))
-               rps->enabled = gen5_rps_enable(rps);
+               enabled = gen5_rps_enable(rps);
+       else
+               MISSING_CASE(INTEL_GEN(i915));
        intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
-       if (!rps->enabled)
+       if (!enabled)
                return;
 
-       drm_WARN_ON(&i915->drm, rps->max_freq < rps->min_freq);
-       drm_WARN_ON(&i915->drm, rps->idle_freq > rps->max_freq);
+       GT_TRACE(rps_to_gt(rps),
+                "min:%x, max:%x, freq:[%d, %d]\n",
+                rps->min_freq, rps->max_freq,
+                intel_gpu_freq(rps, rps->min_freq),
+                intel_gpu_freq(rps, rps->max_freq));
 
-       drm_WARN_ON(&i915->drm, rps->efficient_freq < rps->min_freq);
-       drm_WARN_ON(&i915->drm, rps->efficient_freq > rps->max_freq);
+       GEM_BUG_ON(rps->max_freq < rps->min_freq);
+       GEM_BUG_ON(rps->idle_freq > rps->max_freq);
+
+       GEM_BUG_ON(rps->efficient_freq < rps->min_freq);
+       GEM_BUG_ON(rps->efficient_freq > rps->max_freq);
+
+       if (has_busy_stats(rps))
+               intel_rps_set_timer(rps);
+       else if (INTEL_GEN(i915) >= 6)
+               intel_rps_set_interrupts(rps);
+       else
+               /* Ironlake currently uses intel_ips.ko */ {}
+
+       intel_rps_set_enabled(rps);
 }
 
 static void gen6_rps_disable(struct intel_rps *rps)
@@ -1232,7 +1381,9 @@ void intel_rps_disable(struct intel_rps *rps)
 {
        struct drm_i915_private *i915 = rps_to_i915(rps);
 
-       rps->enabled = false;
+       intel_rps_clear_enabled(rps);
+       intel_rps_clear_interrupts(rps);
+       intel_rps_clear_timer(rps);
 
        if (INTEL_GEN(i915) >= 6)
                gen6_rps_disable(rps);
@@ -1308,7 +1459,8 @@ static void vlv_init_gpll_ref_freq(struct intel_rps *rps)
                                  CCK_GPLL_CLOCK_CONTROL,
                                  i915->czclk_freq);
 
-       DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n", rps->gpll_ref_freq);
+       drm_dbg(&i915->drm, "GPLL reference freq: %d kHz\n",
+               rps->gpll_ref_freq);
 }
 
 static void vlv_rps_init(struct intel_rps *rps)
@@ -1336,28 +1488,24 @@ static void vlv_rps_init(struct intel_rps *rps)
                i915->mem_freq = 1333;
                break;
        }
-       DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", i915->mem_freq);
+       drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq);
 
        rps->max_freq = vlv_rps_max_freq(rps);
        rps->rp0_freq = rps->max_freq;
-       DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
-                        intel_gpu_freq(rps, rps->max_freq),
-                        rps->max_freq);
+       drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n",
+               intel_gpu_freq(rps, rps->max_freq), rps->max_freq);
 
        rps->efficient_freq = vlv_rps_rpe_freq(rps);
-       DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
-                        intel_gpu_freq(rps, rps->efficient_freq),
-                        rps->efficient_freq);
+       drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n",
+               intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq);
 
        rps->rp1_freq = vlv_rps_guar_freq(rps);
-       DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
-                        intel_gpu_freq(rps, rps->rp1_freq),
-                        rps->rp1_freq);
+       drm_dbg(&i915->drm, "RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
+               intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq);
 
        rps->min_freq = vlv_rps_min_freq(rps);
-       DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
-                        intel_gpu_freq(rps, rps->min_freq),
-                        rps->min_freq);
+       drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n",
+               intel_gpu_freq(rps, rps->min_freq), rps->min_freq);
 
        vlv_iosf_sb_put(i915,
                        BIT(VLV_IOSF_SB_PUNIT) |
@@ -1387,28 +1535,24 @@ static void chv_rps_init(struct intel_rps *rps)
                i915->mem_freq = 1600;
                break;
        }
-       DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", i915->mem_freq);
+       drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq);
 
        rps->max_freq = chv_rps_max_freq(rps);
        rps->rp0_freq = rps->max_freq;
-       DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
-                        intel_gpu_freq(rps, rps->max_freq),
-                        rps->max_freq);
+       drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n",
+               intel_gpu_freq(rps, rps->max_freq), rps->max_freq);
 
        rps->efficient_freq = chv_rps_rpe_freq(rps);
-       DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
-                        intel_gpu_freq(rps, rps->efficient_freq),
-                        rps->efficient_freq);
+       drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n",
+               intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq);
 
        rps->rp1_freq = chv_rps_guar_freq(rps);
-       DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
-                        intel_gpu_freq(rps, rps->rp1_freq),
-                        rps->rp1_freq);
+       drm_dbg(&i915->drm, "RP1(Guar) GPU freq: %d MHz (%u)\n",
+               intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq);
 
        rps->min_freq = chv_rps_min_freq(rps);
-       DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
-                        intel_gpu_freq(rps, rps->min_freq),
-                        rps->min_freq);
+       drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n",
+               intel_gpu_freq(rps, rps->min_freq), rps->min_freq);
 
        vlv_iosf_sb_put(i915,
                        BIT(VLV_IOSF_SB_PUNIT) |
@@ -1471,12 +1615,13 @@ static void rps_work(struct work_struct *work)
 {
        struct intel_rps *rps = container_of(work, typeof(*rps), work);
        struct intel_gt *gt = rps_to_gt(rps);
+       struct drm_i915_private *i915 = rps_to_i915(rps);
        bool client_boost = false;
        int new_freq, adj, min, max;
        u32 pm_iir = 0;
 
        spin_lock_irq(&gt->irq_lock);
-       pm_iir = fetch_and_zero(&rps->pm_iir) & READ_ONCE(rps->pm_events);
+       pm_iir = fetch_and_zero(&rps->pm_iir) & rps->pm_events;
        client_boost = atomic_read(&rps->num_waiters);
        spin_unlock_irq(&gt->irq_lock);
 
@@ -1485,6 +1630,10 @@ static void rps_work(struct work_struct *work)
                goto out;
 
        mutex_lock(&rps->lock);
+       if (!intel_rps_is_active(rps)) {
+               mutex_unlock(&rps->lock);
+               return;
+       }
 
        pm_iir |= vlv_wa_c0_ei(rps, pm_iir);
 
@@ -1494,6 +1643,12 @@ static void rps_work(struct work_struct *work)
        max = rps->max_freq_softlimit;
        if (client_boost)
                max = rps->max_freq;
+
+       GT_TRACE(gt,
+                "pm_iir:%x, client_boost:%s, last:%d, cur:%x, min:%x, max:%x\n",
+                pm_iir, yesno(client_boost),
+                adj, new_freq, min, max);
+
        if (client_boost && new_freq < rps->boost_freq) {
                new_freq = rps->boost_freq;
                adj = 0;
@@ -1525,30 +1680,18 @@ static void rps_work(struct work_struct *work)
                adj = 0;
        }
 
-       rps->last_adj = adj;
-
        /*
-        * Limit deboosting and boosting to keep ourselves at the extremes
-        * when in the respective power modes (i.e. slowly decrease frequencies
-        * while in the HIGH_POWER zone and slowly increase frequencies while
-        * in the LOW_POWER zone). On idle, we will hit the timeout and drop
-        * to the next level quickly, and conversely if busy we expect to
-        * hit a waitboost and rapidly switch into max power.
-        */
-       if ((adj < 0 && rps->power.mode == HIGH_POWER) ||
-           (adj > 0 && rps->power.mode == LOW_POWER))
-               rps->last_adj = 0;
-
-       /* sysfs frequency interfaces may have snuck in while servicing the
-        * interrupt
+        * sysfs frequency limits may have snuck in while
+        * servicing the interrupt
         */
        new_freq += adj;
        new_freq = clamp_t(int, new_freq, min, max);
 
        if (intel_rps_set(rps, new_freq)) {
-               DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
-               rps->last_adj = 0;
+               drm_dbg(&i915->drm, "Failed to set new GPU frequency\n");
+               adj = 0;
        }
+       rps->last_adj = adj;
 
        mutex_unlock(&rps->lock);
 
@@ -1568,6 +1711,8 @@ void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
        if (unlikely(!events))
                return;
 
+       GT_TRACE(gt, "irq events:%x\n", events);
+
        gen6_gt_pm_mask_irq(gt, events);
 
        rps->pm_iir |= events;
@@ -1579,10 +1724,12 @@ void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
        struct intel_gt *gt = rps_to_gt(rps);
        u32 events;
 
-       events = pm_iir & READ_ONCE(rps->pm_events);
+       events = pm_iir & rps->pm_events;
        if (events) {
                spin_lock(&gt->irq_lock);
 
+               GT_TRACE(gt, "irq events:%x\n", events);
+
                gen6_gt_pm_mask_irq(gt, events);
                rps->pm_iir |= events;
 
@@ -1640,6 +1787,7 @@ void intel_rps_init_early(struct intel_rps *rps)
        mutex_init(&rps->power.mutex);
 
        INIT_WORK(&rps->work, rps_work);
+       timer_setup(&rps->timer, rps_timer, 0);
 
        atomic_set(&rps->num_waiters, 0);
 }
@@ -1668,9 +1816,10 @@ void intel_rps_init(struct intel_rps *rps)
                sandybridge_pcode_read(i915, GEN6_READ_OC_PARAMS,
                                       &params, NULL);
                if (params & BIT(31)) { /* OC supported */
-                       DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
-                                        (rps->max_freq & 0xff) * 50,
-                                        (params & 0xff) * 50);
+                       drm_dbg(&i915->drm,
+                               "Overclocking supported, max: %dMHz, overclock: %dMHz\n",
+                               (rps->max_freq & 0xff) * 50,
+                               (params & 0xff) * 50);
                        rps->max_freq = params & 0xff;
                }
        }
@@ -1678,7 +1827,9 @@ void intel_rps_init(struct intel_rps *rps)
        /* Finally allow us to boost to max by default */
        rps->boost_freq = rps->max_freq;
        rps->idle_freq = rps->min_freq;
-       rps->cur_freq = rps->idle_freq;
+
+       /* Start in the middle, from here we will autotune based on workload */
+       rps->cur_freq = rps->efficient_freq;
 
        rps->pm_intrmsk_mbz = 0;
 
@@ -1695,6 +1846,12 @@ void intel_rps_init(struct intel_rps *rps)
                rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
 }
 
+void intel_rps_sanitize(struct intel_rps *rps)
+{
+       if (INTEL_GEN(rps_to_i915(rps)) >= 6)
+               rps_disable_interrupts(rps);
+}
+
 u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat)
 {
        struct drm_i915_private *i915 = rps_to_i915(rps);
@@ -1722,7 +1879,7 @@ static u32 read_cagf(struct intel_rps *rps)
                freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
                vlv_punit_put(i915);
        } else {
-               freq = intel_uncore_read(rps_to_gt(rps)->uncore, GEN6_RPSTAT1);
+               freq = intel_uncore_read(rps_to_uncore(rps), GEN6_RPSTAT1);
        }
 
        return intel_rps_get_cagf(rps, freq);
@@ -1730,7 +1887,7 @@ static u32 read_cagf(struct intel_rps *rps)
 
 u32 intel_rps_read_actual_frequency(struct intel_rps *rps)
 {
-       struct intel_runtime_pm *rpm = rps_to_gt(rps)->uncore->rpm;
+       struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm;
        intel_wakeref_t wakeref;
        u32 freq = 0;
 
@@ -1930,3 +2087,7 @@ bool i915_gpu_turbo_disable(void)
        return ret;
 }
 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_rps.c"
+#endif
index dfa98194f3b25ed30f00a8e5b7fef59fd2d63efd..8d3c9d6636623a2aa38df6fc37a3b6b8be24b190 100644 (file)
@@ -13,6 +13,7 @@ struct i915_request;
 
 void intel_rps_init_early(struct intel_rps *rps);
 void intel_rps_init(struct intel_rps *rps);
+void intel_rps_sanitize(struct intel_rps *rps);
 
 void intel_rps_driver_register(struct intel_rps *rps);
 void intel_rps_driver_unregister(struct intel_rps *rps);
@@ -36,4 +37,64 @@ void gen5_rps_irq_handler(struct intel_rps *rps);
 void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir);
 void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir);
 
+static inline bool intel_rps_is_enabled(const struct intel_rps *rps)
+{
+       return test_bit(INTEL_RPS_ENABLED, &rps->flags);
+}
+
+static inline void intel_rps_set_enabled(struct intel_rps *rps)
+{
+       set_bit(INTEL_RPS_ENABLED, &rps->flags);
+}
+
+static inline void intel_rps_clear_enabled(struct intel_rps *rps)
+{
+       clear_bit(INTEL_RPS_ENABLED, &rps->flags);
+}
+
+static inline bool intel_rps_is_active(const struct intel_rps *rps)
+{
+       return test_bit(INTEL_RPS_ACTIVE, &rps->flags);
+}
+
+static inline void intel_rps_set_active(struct intel_rps *rps)
+{
+       set_bit(INTEL_RPS_ACTIVE, &rps->flags);
+}
+
+static inline bool intel_rps_clear_active(struct intel_rps *rps)
+{
+       return test_and_clear_bit(INTEL_RPS_ACTIVE, &rps->flags);
+}
+
+static inline bool intel_rps_has_interrupts(const struct intel_rps *rps)
+{
+       return test_bit(INTEL_RPS_INTERRUPTS, &rps->flags);
+}
+
+static inline void intel_rps_set_interrupts(struct intel_rps *rps)
+{
+       set_bit(INTEL_RPS_INTERRUPTS, &rps->flags);
+}
+
+static inline void intel_rps_clear_interrupts(struct intel_rps *rps)
+{
+       clear_bit(INTEL_RPS_INTERRUPTS, &rps->flags);
+}
+
+static inline bool intel_rps_uses_timer(const struct intel_rps *rps)
+{
+       return test_bit(INTEL_RPS_TIMER, &rps->flags);
+}
+
+static inline void intel_rps_set_timer(struct intel_rps *rps)
+{
+       set_bit(INTEL_RPS_TIMER, &rps->flags);
+}
+
+static inline void intel_rps_clear_timer(struct intel_rps *rps)
+{
+       clear_bit(INTEL_RPS_TIMER, &rps->flags);
+}
+
 #endif /* INTEL_RPS_H */
index c2e279154bd5647bc98865c0e11007da6682b1ee..38083f0402d9cc7f0e4fda9f35ac059b7345aef5 100644 (file)
@@ -31,6 +31,13 @@ struct intel_rps_ei {
        u32 media_c0;
 };
 
+enum {
+       INTEL_RPS_ENABLED = 0,
+       INTEL_RPS_ACTIVE,
+       INTEL_RPS_INTERRUPTS,
+       INTEL_RPS_TIMER,
+};
+
 struct intel_rps {
        struct mutex lock; /* protects enabling and the worker */
 
@@ -38,9 +45,12 @@ struct intel_rps {
         * work, interrupts_enabled and pm_iir are protected by
         * dev_priv->irq_lock
         */
+       struct timer_list timer;
        struct work_struct work;
-       bool enabled;
-       bool active;
+       unsigned long flags;
+
+       ktime_t pm_timestamp;
+       u32 pm_interval;
        u32 pm_iir;
 
        /* PM interrupt bits that should never be masked */
index 74f793423231bcbfb8c7e578b229bfa4be55395a..d173271c73976aca878686c7dffbabe331c79c43 100644 (file)
@@ -65,7 +65,6 @@ u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
 {
        const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
        bool subslice_pg = sseu->has_subslice_pg;
-       struct intel_sseu ctx_sseu;
        u8 slices, subslices;
        u32 rpcs = 0;
 
@@ -78,31 +77,13 @@ u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
 
        /*
         * If i915/perf is active, we want a stable powergating configuration
-        * on the system.
-        *
-        * We could choose full enablement, but on ICL we know there are use
-        * cases which disable slices for functional, apart for performance
-        * reasons. So in this case we select a known stable subset.
+        * on the system. Use the configuration pinned by i915/perf.
         */
-       if (!i915->perf.exclusive_stream) {
-               ctx_sseu = *req_sseu;
-       } else {
-               ctx_sseu = intel_sseu_from_device_info(sseu);
-
-               if (IS_GEN(i915, 11)) {
-                       /*
-                        * We only need subslice count so it doesn't matter
-                        * which ones we select - just turn off low bits in the
-                        * amount of half of all available subslices per slice.
-                        */
-                       ctx_sseu.subslice_mask =
-                               ~(~0 << (hweight8(ctx_sseu.subslice_mask) / 2));
-                       ctx_sseu.slice_mask = 0x1;
-               }
-       }
+       if (i915->perf.exclusive_stream)
+               req_sseu = &i915->perf.sseu;
 
-       slices = hweight8(ctx_sseu.slice_mask);
-       subslices = hweight8(ctx_sseu.subslice_mask);
+       slices = hweight8(req_sseu->slice_mask);
+       subslices = hweight8(req_sseu->subslice_mask);
 
        /*
         * Since the SScount bitfield in GEN8_R_PWR_CLK_STATE is only three bits
@@ -175,13 +156,13 @@ u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
        if (sseu->has_eu_pg) {
                u32 val;
 
-               val = ctx_sseu.min_eus_per_subslice << GEN8_RPCS_EU_MIN_SHIFT;
+               val = req_sseu->min_eus_per_subslice << GEN8_RPCS_EU_MIN_SHIFT;
                GEM_BUG_ON(val & ~GEN8_RPCS_EU_MIN_MASK);
                val &= GEN8_RPCS_EU_MIN_MASK;
 
                rpcs |= val;
 
-               val = ctx_sseu.max_eus_per_subslice << GEN8_RPCS_EU_MAX_SHIFT;
+               val = req_sseu->max_eus_per_subslice << GEN8_RPCS_EU_MAX_SHIFT;
                GEM_BUG_ON(val & ~GEN8_RPCS_EU_MAX_MASK);
                val &= GEN8_RPCS_EU_MAX_MASK;
 
index 08b56d7ab4f45054754373b5b47a541755f4c01b..4546284fede1fa9800b9f15a364e039320f5b844 100644 (file)
@@ -119,6 +119,15 @@ static void __idle_hwsp_free(struct intel_timeline_hwsp *hwsp, int cacheline)
        spin_unlock_irqrestore(&gt->hwsp_lock, flags);
 }
 
+static void __rcu_cacheline_free(struct rcu_head *rcu)
+{
+       struct intel_timeline_cacheline *cl =
+               container_of(rcu, typeof(*cl), rcu);
+
+       i915_active_fini(&cl->active);
+       kfree(cl);
+}
+
 static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
 {
        GEM_BUG_ON(!i915_active_is_idle(&cl->active));
@@ -127,8 +136,7 @@ static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
        i915_vma_put(cl->hwsp->vma);
        __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
 
-       i915_active_fini(&cl->active);
-       kfree_rcu(cl, rcu);
+       call_rcu(&cl->rcu, __rcu_cacheline_free);
 }
 
 __i915_active_call
@@ -203,9 +211,9 @@ static void cacheline_free(struct intel_timeline_cacheline *cl)
        i915_active_release(&cl->active);
 }
 
-int intel_timeline_init(struct intel_timeline *timeline,
-                       struct intel_gt *gt,
-                       struct i915_vma *hwsp)
+static int intel_timeline_init(struct intel_timeline *timeline,
+                              struct intel_gt *gt,
+                              struct i915_vma *hwsp)
 {
        void *vaddr;
 
@@ -272,7 +280,7 @@ void intel_gt_init_timelines(struct intel_gt *gt)
        INIT_LIST_HEAD(&timelines->hwsp_free_list);
 }
 
-void intel_timeline_fini(struct intel_timeline *timeline)
+static void intel_timeline_fini(struct intel_timeline *timeline)
 {
        GEM_BUG_ON(atomic_read(&timeline->pin_count));
        GEM_BUG_ON(!list_empty(&timeline->requests));
@@ -329,6 +337,13 @@ int intel_timeline_pin(struct intel_timeline *tl)
        return 0;
 }
 
+void intel_timeline_reset_seqno(const struct intel_timeline *tl)
+{
+       /* Must be pinned to be writable, and no requests in flight. */
+       GEM_BUG_ON(!atomic_read(&tl->pin_count));
+       WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno);
+}
+
 void intel_timeline_enter(struct intel_timeline *tl)
 {
        struct intel_gt_timelines *timelines = &tl->gt->timelines;
@@ -357,8 +372,16 @@ void intel_timeline_enter(struct intel_timeline *tl)
                return;
 
        spin_lock(&timelines->lock);
-       if (!atomic_fetch_inc(&tl->active_count))
+       if (!atomic_fetch_inc(&tl->active_count)) {
+               /*
+                * The HWSP is volatile, and may have been lost while inactive,
+                * e.g. across suspend/resume. Be paranoid, and ensure that
+                * the HWSP value matches our seqno so we don't proclaim
+                * the next request as already complete.
+                */
+               intel_timeline_reset_seqno(tl);
                list_add_tail(&tl->link, &timelines->active_list);
+       }
        spin_unlock(&timelines->lock);
 }
 
index f5b7eade3809b41b220d9bfea5ac30cd556c38a9..4298b9ac7327eff83a7569a9559239753d0daff2 100644 (file)
 #include "i915_syncmap.h"
 #include "gt/intel_timeline_types.h"
 
-int intel_timeline_init(struct intel_timeline *tl,
-                       struct intel_gt *gt,
-                       struct i915_vma *hwsp);
-void intel_timeline_fini(struct intel_timeline *tl);
-
 struct intel_timeline *
 intel_timeline_create(struct intel_gt *gt, struct i915_vma *global_hwsp);
 
@@ -84,6 +79,8 @@ int intel_timeline_get_seqno(struct intel_timeline *tl,
 void intel_timeline_exit(struct intel_timeline *tl);
 void intel_timeline_unpin(struct intel_timeline *tl);
 
+void intel_timeline_reset_seqno(const struct intel_timeline *tl);
+
 int intel_timeline_read_hwsp(struct i915_request *from,
                             struct i915_request *until,
                             u32 *hwsp_offset);
index 5176ad1a3976ba393471d343b3c11df00094d001..90a2b9e399b094b50300f5c8e753ee2ad4bf94cc 100644 (file)
@@ -485,25 +485,14 @@ static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine,
 static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine,
                                     struct i915_wa_list *wal)
 {
-       struct drm_i915_private *i915 = engine->i915;
-
        /* WaForceContextSaveRestoreNonCoherent:cnl */
        WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
                          HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
 
-       /* WaThrottleEUPerfToAvoidTDBackPressure:cnl(pre-prod) */
-       if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
-               WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, THROTTLE_12_5);
-
        /* WaDisableReplayBufferBankArbitrationOptimization:cnl */
        WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
                          GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
 
-       /* WaDisableEnhancedSBEVertexCaching:cnl (pre-prod) */
-       if (IS_CNL_REVID(i915, 0, CNL_REVID_B0))
-               WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
-                                 GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE);
-
        /* WaPushConstantDereferenceHoldDisable:cnl */
        WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE);
 
@@ -837,7 +826,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
                        intel_uncore_read(&i915->uncore, GEN10_MIRROR_FUSE3) &
                        GEN10_L3BANK_MASK;
 
-               DRM_DEBUG_DRIVER("L3 fuse = %x\n", l3_fuse);
+               drm_dbg(&i915->drm, "L3 fuse = %x\n", l3_fuse);
                l3_en = ~(l3_fuse << GEN10_L3BANK_PAIR_COUNT | l3_fuse);
        } else {
                l3_en = ~0;
@@ -846,7 +835,8 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
        slice = fls(sseu->slice_mask) - 1;
        subslice = fls(l3_en & intel_sseu_get_subslices(sseu, slice));
        if (!subslice) {
-               DRM_WARN("No common index found between subslice mask %x and L3 bank mask %x!\n",
+               drm_warn(&i915->drm,
+                        "No common index found between subslice mask %x and L3 bank mask %x!\n",
                         intel_sseu_get_subslices(sseu, slice), l3_en);
                subslice = fls(l3_en);
                drm_WARN_ON(&i915->drm, !subslice);
@@ -861,7 +851,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
                mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
        }
 
-       DRM_DEBUG_DRIVER("MCR slice/subslice = %x\n", mcr);
+       drm_dbg(&i915->drm, "MCR slice/subslice = %x\n", mcr);
 
        wa_write_masked_or(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr);
 }
@@ -871,12 +861,6 @@ cnl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
 {
        wa_init_mcr(i915, wal);
 
-       /* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
-       if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
-               wa_write_or(wal,
-                           GAMT_CHKN_BIT_REG,
-                           GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
-
        /* WaInPlaceDecompressionHang:cnl */
        wa_write_or(wal,
                    GEN9_GAMT_ECO_REG_RW_IA,
@@ -933,15 +917,20 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
                    GAMT_CHKN_BIT_REG,
                    GAMT_CHKN_DISABLE_L3_COH_PIPE);
 
-       /* Wa_1607087056:icl */
-       wa_write_or(wal,
-                   SLICE_UNIT_LEVEL_CLKGATE,
-                   L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
+       /* Wa_1607087056:icl,ehl,jsl */
+       if (IS_ICELAKE(i915) ||
+           IS_EHL_REVID(i915, EHL_REVID_A0, EHL_REVID_A0)) {
+               wa_write_or(wal,
+                           SLICE_UNIT_LEVEL_CLKGATE,
+                           L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
+       }
 }
 
 static void
 tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
 {
+       wa_init_mcr(i915, wal);
+
        /* Wa_1409420604:tgl */
        if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
                wa_write_or(wal,
@@ -1379,12 +1368,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
                            GEN7_FF_THREAD_MODE,
                            GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
 
-               /*
-                * Wa_1409085225:tgl
-                * Wa_14010229206:tgl
-                */
-               wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
-
                /* Wa_1408615072:tgl */
                wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
                            VSUNIT_CLKGATE_DIS_TGL);
@@ -1402,6 +1385,12 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
                wa_masked_en(wal,
                             GEN9_CS_DEBUG_MODE1,
                             FF_DOP_CLOCK_GATE_DISABLE);
+
+               /*
+                * Wa_1409085225:tgl
+                * Wa_14010229206:tgl
+                */
+               wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
        }
 
        if (IS_GEN(i915, 11)) {
index 4a53ded7c2dd6a5ad80413c5c1358fbc6b3955af..b8dd3cbc8696bdb1f202efa7e3e2547828341b83 100644 (file)
@@ -28,7 +28,6 @@
 #include "i915_drv.h"
 #include "intel_context.h"
 #include "intel_engine_pm.h"
-#include "intel_engine_pool.h"
 
 #include "mock_engine.h"
 #include "selftests/mock_request.h"
@@ -328,7 +327,6 @@ int mock_engine_init(struct intel_engine_cs *engine)
        intel_engine_init_execlists(engine);
        intel_engine_init__pm(engine);
        intel_engine_init_retire(engine);
-       intel_engine_pool_init(&engine->pool);
 
        ce = create_kernel_context(engine);
        if (IS_ERR(ce))
index e874dfaa531685a3f84ccb8e0e760ea176174be3..52af1cee9a94eb4845766538d2598537d9dd7120 100644 (file)
@@ -24,6 +24,7 @@ static int request_sync(struct i915_request *rq)
 
        /* Opencode i915_request_add() so we can keep the timeline locked. */
        __i915_request_commit(rq);
+       rq->sched.attr.priority = I915_PRIORITY_BARRIER;
        __i915_request_queue(rq, NULL);
 
        timeout = i915_request_wait(rq, 0, HZ / 10);
@@ -154,10 +155,7 @@ static int live_context_size(void *arg)
         */
 
        for_each_engine(engine, gt, id) {
-               struct {
-                       struct drm_i915_gem_object *state;
-                       void *pinned;
-               } saved;
+               struct file *saved;
 
                if (!engine->context_size)
                        continue;
@@ -171,8 +169,7 @@ static int live_context_size(void *arg)
                 * active state is sufficient, we are only checking that we
                 * don't use more than we planned.
                 */
-               saved.state = fetch_and_zero(&engine->default_state);
-               saved.pinned = fetch_and_zero(&engine->pinned_default_state);
+               saved = fetch_and_zero(&engine->default_state);
 
                /* Overlaps with the execlists redzone */
                engine->context_size += I915_GTT_PAGE_SIZE;
@@ -181,8 +178,7 @@ static int live_context_size(void *arg)
 
                engine->context_size -= I915_GTT_PAGE_SIZE;
 
-               engine->pinned_default_state = saved.pinned;
-               engine->default_state = saved.state;
+               engine->default_state = saved;
 
                intel_engine_pm_put(engine);
 
index 09ff8e4f88af8d76382213a0ea0283b38f62ab10..242181a5214c42580fb0613f88d0eeed506f06d1 100644 (file)
@@ -7,6 +7,7 @@
 
 #include "selftest_llc.h"
 #include "selftest_rc6.h"
+#include "selftest_rps.h"
 
 static int live_gt_resume(void *arg)
 {
@@ -52,6 +53,13 @@ int intel_gt_pm_live_selftests(struct drm_i915_private *i915)
 {
        static const struct i915_subtest tests[] = {
                SUBTEST(live_rc6_manual),
+               SUBTEST(live_rps_clock_interval),
+               SUBTEST(live_rps_control),
+               SUBTEST(live_rps_frequency_cs),
+               SUBTEST(live_rps_frequency_srm),
+               SUBTEST(live_rps_power),
+               SUBTEST(live_rps_interrupt),
+               SUBTEST(live_rps_dynamic),
                SUBTEST(live_gt_resume),
        };
 
index f95ae15ce865c0fbad7fd7f4a85e655ba9b0c17e..824f99c4cc7cda379731266623590639b4e4d4d5 100644 (file)
@@ -21,7 +21,8 @@
 #include "gem/selftests/mock_context.h"
 
 #define CS_GPR(engine, n) ((engine)->mmio_base + 0x600 + (n) * 4)
-#define NUM_GPR_DW (16 * 2) /* each GPR is 2 dwords */
+#define NUM_GPR 16
+#define NUM_GPR_DW (NUM_GPR * 2) /* each GPR is 2 dwords */
 
 static struct i915_vma *create_scratch(struct intel_gt *gt)
 {
@@ -68,26 +69,41 @@ static void engine_heartbeat_enable(struct intel_engine_cs *engine,
        engine->props.heartbeat_interval_ms = saved;
 }
 
+static bool is_active(struct i915_request *rq)
+{
+       if (i915_request_is_active(rq))
+               return true;
+
+       if (i915_request_on_hold(rq))
+               return true;
+
+       if (i915_request_started(rq))
+               return true;
+
+       return false;
+}
+
 static int wait_for_submit(struct intel_engine_cs *engine,
                           struct i915_request *rq,
                           unsigned long timeout)
 {
        timeout += jiffies;
        do {
-               cond_resched();
-               intel_engine_flush_submission(engine);
+               bool done = time_after(jiffies, timeout);
 
-               if (READ_ONCE(engine->execlists.pending[0]))
-                       continue;
-
-               if (i915_request_is_active(rq))
+               if (i915_request_completed(rq)) /* that was quick! */
                        return 0;
 
-               if (i915_request_started(rq)) /* that was quick! */
+               /* Wait until the HW has acknowleged the submission (or err) */
+               intel_engine_flush_submission(engine);
+               if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq))
                        return 0;
-       } while (time_before(jiffies, timeout));
 
-       return -ETIME;
+               if (done)
+                       return -ETIME;
+
+               cond_resched();
+       } while (1);
 }
 
 static int wait_for_reset(struct intel_engine_cs *engine,
@@ -634,9 +650,9 @@ static int live_error_interrupt(void *arg)
                                                 error_repr(p->error[i]));
 
                                if (!i915_request_started(client[i])) {
-                                       pr_debug("%s: %s request not stated!\n",
-                                                engine->name,
-                                                error_repr(p->error[i]));
+                                       pr_err("%s: %s request not started!\n",
+                                              engine->name,
+                                              error_repr(p->error[i]));
                                        err = -ETIME;
                                        goto out;
                                }
@@ -644,9 +660,10 @@ static int live_error_interrupt(void *arg)
                                /* Kick the tasklet to process the error */
                                intel_engine_flush_submission(engine);
                                if (client[i]->fence.error != p->error[i]) {
-                                       pr_err("%s: %s request completed with wrong error code: %d\n",
+                                       pr_err("%s: %s request (%s) with wrong error code: %d\n",
                                               engine->name,
                                               error_repr(p->error[i]),
+                                              i915_request_completed(client[i]) ? "completed" : "running",
                                               client[i]->fence.error);
                                        err = -EINVAL;
                                        goto out;
@@ -1057,7 +1074,6 @@ static int live_timeslice_rewind(void *arg)
                               engine->name);
                        goto err;
                }
-               GEM_BUG_ON(!timer_pending(&engine->execlists.timer));
 
                /* ELSP[] = { { A:rq1, A:rq2 }, { B:rq1 } } */
                if (i915_request_is_active(rq[A2])) { /* semaphore yielded! */
@@ -1230,8 +1246,14 @@ static int live_timeslice_queue(void *arg)
                if (err)
                        goto err_rq;
 
-               intel_engine_flush_submission(engine);
+               /* Wait until we ack the release_queue and start timeslicing */
+               do {
+                       cond_resched();
+                       intel_engine_flush_submission(engine);
+               } while (READ_ONCE(engine->execlists.pending[0]));
+
                if (!READ_ONCE(engine->execlists.timer.expires) &&
+                   execlists_active(&engine->execlists) == rq &&
                    !i915_request_completed(rq)) {
                        struct drm_printer p =
                                drm_info_printer(gt->i915->drm.dev);
@@ -2032,6 +2054,9 @@ static int __cancel_hostile(struct live_preempt_cancel *arg)
        if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
                return 0;
 
+       if (!intel_has_reset_engine(arg->engine->gt))
+               return 0;
+
        GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
        rq = spinner_create_request(&arg->a.spin,
                                    arg->a.ctx, arg->engine,
@@ -2632,7 +2657,7 @@ static int create_gang(struct intel_engine_cs *engine,
        if (IS_ERR(rq))
                goto err_obj;
 
-       rq->batch = vma;
+       rq->batch = i915_vma_get(vma);
        i915_request_get(rq);
 
        i915_vma_lock(vma);
@@ -2656,6 +2681,7 @@ static int create_gang(struct intel_engine_cs *engine,
        return 0;
 
 err_rq:
+       i915_vma_put(rq->batch);
        i915_request_put(rq);
 err_obj:
        i915_gem_object_put(obj);
@@ -2752,6 +2778,7 @@ static int live_preempt_gang(void *arg)
                                err = -ETIME;
                        }
 
+                       i915_vma_put(rq->batch);
                        i915_request_put(rq);
                        rq = n;
                }
@@ -2765,6 +2792,331 @@ static int live_preempt_gang(void *arg)
        return 0;
 }
 
+static struct i915_vma *
+create_gpr_user(struct intel_engine_cs *engine,
+               struct i915_vma *result,
+               unsigned int offset)
+{
+       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
+       u32 *cs;
+       int err;
+       int i;
+
+       obj = i915_gem_object_create_internal(engine->i915, 4096);
+       if (IS_ERR(obj))
+               return ERR_CAST(obj);
+
+       vma = i915_vma_instance(obj, result->vm, NULL);
+       if (IS_ERR(vma)) {
+               i915_gem_object_put(obj);
+               return vma;
+       }
+
+       err = i915_vma_pin(vma, 0, 0, PIN_USER);
+       if (err) {
+               i915_vma_put(vma);
+               return ERR_PTR(err);
+       }
+
+       cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
+       if (IS_ERR(cs)) {
+               i915_vma_put(vma);
+               return ERR_CAST(cs);
+       }
+
+       /* All GPR are clear for new contexts. We use GPR(0) as a constant */
+       *cs++ = MI_LOAD_REGISTER_IMM(1);
+       *cs++ = CS_GPR(engine, 0);
+       *cs++ = 1;
+
+       for (i = 1; i < NUM_GPR; i++) {
+               u64 addr;
+
+               /*
+                * Perform: GPR[i]++
+                *
+                * As we read and write into the context saved GPR[i], if
+                * we restart this batch buffer from an earlier point, we
+                * will repeat the increment and store a value > 1.
+                */
+               *cs++ = MI_MATH(4);
+               *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(i));
+               *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(0));
+               *cs++ = MI_MATH_ADD;
+               *cs++ = MI_MATH_STORE(MI_MATH_REG(i), MI_MATH_REG_ACCU);
+
+               addr = result->node.start + offset + i * sizeof(*cs);
+               *cs++ = MI_STORE_REGISTER_MEM_GEN8;
+               *cs++ = CS_GPR(engine, 2 * i);
+               *cs++ = lower_32_bits(addr);
+               *cs++ = upper_32_bits(addr);
+
+               *cs++ = MI_SEMAPHORE_WAIT |
+                       MI_SEMAPHORE_POLL |
+                       MI_SEMAPHORE_SAD_GTE_SDD;
+               *cs++ = i;
+               *cs++ = lower_32_bits(result->node.start);
+               *cs++ = upper_32_bits(result->node.start);
+       }
+
+       *cs++ = MI_BATCH_BUFFER_END;
+       i915_gem_object_flush_map(obj);
+       i915_gem_object_unpin_map(obj);
+
+       return vma;
+}
+
+static struct i915_vma *create_global(struct intel_gt *gt, size_t sz)
+{
+       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
+       int err;
+
+       obj = i915_gem_object_create_internal(gt->i915, sz);
+       if (IS_ERR(obj))
+               return ERR_CAST(obj);
+
+       vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+       if (IS_ERR(vma)) {
+               i915_gem_object_put(obj);
+               return vma;
+       }
+
+       err = i915_ggtt_pin(vma, 0, 0);
+       if (err) {
+               i915_vma_put(vma);
+               return ERR_PTR(err);
+       }
+
+       return vma;
+}
+
+static struct i915_request *
+create_gpr_client(struct intel_engine_cs *engine,
+                 struct i915_vma *global,
+                 unsigned int offset)
+{
+       struct i915_vma *batch, *vma;
+       struct intel_context *ce;
+       struct i915_request *rq;
+       int err;
+
+       ce = intel_context_create(engine);
+       if (IS_ERR(ce))
+               return ERR_CAST(ce);
+
+       vma = i915_vma_instance(global->obj, ce->vm, NULL);
+       if (IS_ERR(vma)) {
+               err = PTR_ERR(vma);
+               goto out_ce;
+       }
+
+       err = i915_vma_pin(vma, 0, 0, PIN_USER);
+       if (err)
+               goto out_ce;
+
+       batch = create_gpr_user(engine, vma, offset);
+       if (IS_ERR(batch)) {
+               err = PTR_ERR(batch);
+               goto out_vma;
+       }
+
+       rq = intel_context_create_request(ce);
+       if (IS_ERR(rq)) {
+               err = PTR_ERR(rq);
+               goto out_batch;
+       }
+
+       i915_vma_lock(vma);
+       err = i915_request_await_object(rq, vma->obj, false);
+       if (!err)
+               err = i915_vma_move_to_active(vma, rq, 0);
+       i915_vma_unlock(vma);
+
+       i915_vma_lock(batch);
+       if (!err)
+               err = i915_request_await_object(rq, batch->obj, false);
+       if (!err)
+               err = i915_vma_move_to_active(batch, rq, 0);
+       if (!err)
+               err = rq->engine->emit_bb_start(rq,
+                                               batch->node.start,
+                                               PAGE_SIZE, 0);
+       i915_vma_unlock(batch);
+       i915_vma_unpin(batch);
+
+       if (!err)
+               i915_request_get(rq);
+       i915_request_add(rq);
+
+out_batch:
+       i915_vma_put(batch);
+out_vma:
+       i915_vma_unpin(vma);
+out_ce:
+       intel_context_put(ce);
+       return err ? ERR_PTR(err) : rq;
+}
+
+static int preempt_user(struct intel_engine_cs *engine,
+                       struct i915_vma *global,
+                       int id)
+{
+       struct i915_sched_attr attr = {
+               .priority = I915_PRIORITY_MAX
+       };
+       struct i915_request *rq;
+       int err = 0;
+       u32 *cs;
+
+       rq = intel_engine_create_kernel_request(engine);
+       if (IS_ERR(rq))
+               return PTR_ERR(rq);
+
+       cs = intel_ring_begin(rq, 4);
+       if (IS_ERR(cs)) {
+               i915_request_add(rq);
+               return PTR_ERR(cs);
+       }
+
+       *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+       *cs++ = i915_ggtt_offset(global);
+       *cs++ = 0;
+       *cs++ = id;
+
+       intel_ring_advance(rq, cs);
+
+       i915_request_get(rq);
+       i915_request_add(rq);
+
+       engine->schedule(rq, &attr);
+
+       if (i915_request_wait(rq, 0, HZ / 2) < 0)
+               err = -ETIME;
+       i915_request_put(rq);
+
+       return err;
+}
+
+static int live_preempt_user(void *arg)
+{
+       struct intel_gt *gt = arg;
+       struct intel_engine_cs *engine;
+       struct i915_vma *global;
+       enum intel_engine_id id;
+       u32 *result;
+       int err = 0;
+
+       if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
+               return 0;
+
+       /*
+        * In our other tests, we look at preemption in carefully
+        * controlled conditions in the ringbuffer. Since most of the
+        * time is spent in user batches, most of our preemptions naturally
+        * occur there. We want to verify that when we preempt inside a batch
+        * we continue on from the current instruction and do not roll back
+        * to the start, or another earlier arbitration point.
+        *
+        * To verify this, we create a batch which is a mixture of
+        * MI_MATH (gpr++) MI_SRM (gpr) and preemption points. Then with
+        * a few preempting contexts thrown into the mix, we look for any
+        * repeated instructions (which show up as incorrect values).
+        */
+
+       global = create_global(gt, 4096);
+       if (IS_ERR(global))
+               return PTR_ERR(global);
+
+       result = i915_gem_object_pin_map(global->obj, I915_MAP_WC);
+       if (IS_ERR(result)) {
+               i915_vma_unpin_and_release(&global, 0);
+               return PTR_ERR(result);
+       }
+
+       for_each_engine(engine, gt, id) {
+               struct i915_request *client[3] = {};
+               struct igt_live_test t;
+               int i;
+
+               if (!intel_engine_has_preemption(engine))
+                       continue;
+
+               if (IS_GEN(gt->i915, 8) && engine->class != RENDER_CLASS)
+                       continue; /* we need per-context GPR */
+
+               if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+                       err = -EIO;
+                       break;
+               }
+
+               memset(result, 0, 4096);
+
+               for (i = 0; i < ARRAY_SIZE(client); i++) {
+                       struct i915_request *rq;
+
+                       rq = create_gpr_client(engine, global,
+                                              NUM_GPR * i * sizeof(u32));
+                       if (IS_ERR(rq))
+                               goto end_test;
+
+                       client[i] = rq;
+               }
+
+               /* Continuously preempt the set of 3 running contexts */
+               for (i = 1; i <= NUM_GPR; i++) {
+                       err = preempt_user(engine, global, i);
+                       if (err)
+                               goto end_test;
+               }
+
+               if (READ_ONCE(result[0]) != NUM_GPR) {
+                       pr_err("%s: Failed to release semaphore\n",
+                              engine->name);
+                       err = -EIO;
+                       goto end_test;
+               }
+
+               for (i = 0; i < ARRAY_SIZE(client); i++) {
+                       int gpr;
+
+                       if (i915_request_wait(client[i], 0, HZ / 2) < 0) {
+                               err = -ETIME;
+                               goto end_test;
+                       }
+
+                       for (gpr = 1; gpr < NUM_GPR; gpr++) {
+                               if (result[NUM_GPR * i + gpr] != 1) {
+                                       pr_err("%s: Invalid result, client %d, gpr %d, result: %d\n",
+                                              engine->name,
+                                              i, gpr, result[NUM_GPR * i + gpr]);
+                                       err = -EINVAL;
+                                       goto end_test;
+                               }
+                       }
+               }
+
+end_test:
+               for (i = 0; i < ARRAY_SIZE(client); i++) {
+                       if (!client[i])
+                               break;
+
+                       i915_request_put(client[i]);
+               }
+
+               /* Flush the semaphores on error */
+               smp_store_mb(result[0], -1);
+               if (igt_live_test_end(&t))
+                       err = -EIO;
+               if (err)
+                       break;
+       }
+
+       i915_vma_unpin_and_release(&global, I915_VMA_RELEASE_MAP);
+       return err;
+}
+
 static int live_preempt_timeout(void *arg)
 {
        struct intel_gt *gt = arg;
@@ -3972,6 +4324,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
                SUBTEST(live_chain_preempt),
                SUBTEST(live_preempt_gang),
                SUBTEST(live_preempt_timeout),
+               SUBTEST(live_preempt_user),
                SUBTEST(live_preempt_smoke),
                SUBTEST(live_virtual_engine),
                SUBTEST(live_virtual_mask),
@@ -3989,35 +4342,6 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
        return intel_gt_live_subtests(tests, &i915->gt);
 }
 
-static void hexdump(const void *buf, size_t len)
-{
-       const size_t rowsize = 8 * sizeof(u32);
-       const void *prev = NULL;
-       bool skip = false;
-       size_t pos;
-
-       for (pos = 0; pos < len; pos += rowsize) {
-               char line[128];
-
-               if (prev && !memcmp(prev, buf + pos, rowsize)) {
-                       if (!skip) {
-                               pr_info("*\n");
-                               skip = true;
-                       }
-                       continue;
-               }
-
-               WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos,
-                                               rowsize, sizeof(u32),
-                                               line, sizeof(line),
-                                               false) >= sizeof(line));
-               pr_info("[%04zx] %s\n", pos, line);
-
-               prev = buf + pos;
-               skip = false;
-       }
-}
-
 static int emit_semaphore_signal(struct intel_context *ce, void *slot)
 {
        const u32 offset =
@@ -4099,13 +4423,12 @@ static int live_lrc_layout(void *arg)
                if (!engine->default_state)
                        continue;
 
-               hw = i915_gem_object_pin_map(engine->default_state,
-                                            I915_MAP_WB);
+               hw = shmem_pin_map(engine->default_state);
                if (IS_ERR(hw)) {
                        err = PTR_ERR(hw);
                        break;
                }
-               hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+               hw += LRC_STATE_OFFSET / sizeof(*hw);
 
                execlists_init_reg_state(memset(lrc, POISON_INUSE, PAGE_SIZE),
                                         engine->kernel_context,
@@ -4166,13 +4489,13 @@ static int live_lrc_layout(void *arg)
 
                if (err) {
                        pr_info("%s: HW register image:\n", engine->name);
-                       hexdump(hw, PAGE_SIZE);
+                       igt_hexdump(hw, PAGE_SIZE);
 
                        pr_info("%s: SW register image:\n", engine->name);
-                       hexdump(lrc, PAGE_SIZE);
+                       igt_hexdump(lrc, PAGE_SIZE);
                }
 
-               i915_gem_object_unpin_map(engine->default_state);
+               shmem_unpin_map(engine->default_state, hw);
                if (err)
                        break;
        }
@@ -4240,11 +4563,36 @@ static int live_lrc_fixed(void *arg)
                                CTX_BB_STATE - 1,
                                "BB_STATE"
                        },
+                       {
+                               i915_mmio_reg_offset(RING_BB_PER_CTX_PTR(engine->mmio_base)),
+                               lrc_ring_wa_bb_per_ctx(engine),
+                               "RING_BB_PER_CTX_PTR"
+                       },
+                       {
+                               i915_mmio_reg_offset(RING_INDIRECT_CTX(engine->mmio_base)),
+                               lrc_ring_indirect_ptr(engine),
+                               "RING_INDIRECT_CTX_PTR"
+                       },
+                       {
+                               i915_mmio_reg_offset(RING_INDIRECT_CTX_OFFSET(engine->mmio_base)),
+                               lrc_ring_indirect_offset(engine),
+                               "RING_INDIRECT_CTX_OFFSET"
+                       },
                        {
                                i915_mmio_reg_offset(RING_CTX_TIMESTAMP(engine->mmio_base)),
                                CTX_TIMESTAMP - 1,
                                "RING_CTX_TIMESTAMP"
                        },
+                       {
+                               i915_mmio_reg_offset(GEN8_RING_CS_GPR(engine->mmio_base, 0)),
+                               lrc_ring_gpr0(engine),
+                               "RING_CS_GPR0"
+                       },
+                       {
+                               i915_mmio_reg_offset(RING_CMD_BUF_CCTL(engine->mmio_base)),
+                               lrc_ring_cmd_buf_cctl(engine),
+                               "RING_CMD_BUF_CCTL"
+                       },
                        { },
                }, *t;
                u32 *hw;
@@ -4252,13 +4600,12 @@ static int live_lrc_fixed(void *arg)
                if (!engine->default_state)
                        continue;
 
-               hw = i915_gem_object_pin_map(engine->default_state,
-                                            I915_MAP_WB);
+               hw = shmem_pin_map(engine->default_state);
                if (IS_ERR(hw)) {
                        err = PTR_ERR(hw);
                        break;
                }
-               hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+               hw += LRC_STATE_OFFSET / sizeof(*hw);
 
                for (t = tbl; t->name; t++) {
                        int dw = find_offset(hw, t->reg);
@@ -4274,7 +4621,7 @@ static int live_lrc_fixed(void *arg)
                        }
                }
 
-               i915_gem_object_unpin_map(engine->default_state);
+               shmem_unpin_map(engine->default_state, hw);
        }
 
        return err;
@@ -4830,6 +5177,7 @@ store_context(struct intel_context *ce, struct i915_vma *scratch)
 {
        struct i915_vma *batch;
        u32 dw, x, *cs, *hw;
+       u32 *defaults;
 
        batch = create_user_vma(ce->vm, SZ_64K);
        if (IS_ERR(batch))
@@ -4841,10 +5189,17 @@ store_context(struct intel_context *ce, struct i915_vma *scratch)
                return ERR_CAST(cs);
        }
 
+       defaults = shmem_pin_map(ce->engine->default_state);
+       if (!defaults) {
+               i915_gem_object_unpin_map(batch->obj);
+               i915_vma_put(batch);
+               return ERR_PTR(-ENOMEM);
+       }
+
        x = 0;
        dw = 0;
-       hw = ce->engine->pinned_default_state;
-       hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+       hw = defaults;
+       hw += LRC_STATE_OFFSET / sizeof(*hw);
        do {
                u32 len = hw[dw] & 0x7f;
 
@@ -4874,6 +5229,8 @@ store_context(struct intel_context *ce, struct i915_vma *scratch)
 
        *cs++ = MI_BATCH_BUFFER_END;
 
+       shmem_unpin_map(ce->engine->default_state, defaults);
+
        i915_gem_object_flush_map(batch->obj);
        i915_gem_object_unpin_map(batch->obj);
 
@@ -4984,6 +5341,7 @@ static struct i915_vma *load_context(struct intel_context *ce, u32 poison)
 {
        struct i915_vma *batch;
        u32 dw, *cs, *hw;
+       u32 *defaults;
 
        batch = create_user_vma(ce->vm, SZ_64K);
        if (IS_ERR(batch))
@@ -4995,9 +5353,16 @@ static struct i915_vma *load_context(struct intel_context *ce, u32 poison)
                return ERR_CAST(cs);
        }
 
+       defaults = shmem_pin_map(ce->engine->default_state);
+       if (!defaults) {
+               i915_gem_object_unpin_map(batch->obj);
+               i915_vma_put(batch);
+               return ERR_PTR(-ENOMEM);
+       }
+
        dw = 0;
-       hw = ce->engine->pinned_default_state;
-       hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+       hw = defaults;
+       hw += LRC_STATE_OFFSET / sizeof(*hw);
        do {
                u32 len = hw[dw] & 0x7f;
 
@@ -5024,6 +5389,8 @@ static struct i915_vma *load_context(struct intel_context *ce, u32 poison)
 
        *cs++ = MI_BATCH_BUFFER_END;
 
+       shmem_unpin_map(ce->engine->default_state, defaults);
+
        i915_gem_object_flush_map(batch->obj);
        i915_gem_object_unpin_map(batch->obj);
 
@@ -5091,6 +5458,7 @@ static int compare_isolation(struct intel_engine_cs *engine,
 {
        u32 x, dw, *hw, *lrc;
        u32 *A[2], *B[2];
+       u32 *defaults;
        int err = 0;
 
        A[0] = i915_gem_object_pin_map(ref[0]->obj, I915_MAP_WC);
@@ -5121,12 +5489,18 @@ static int compare_isolation(struct intel_engine_cs *engine,
                err = PTR_ERR(lrc);
                goto err_B1;
        }
-       lrc += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+       lrc += LRC_STATE_OFFSET / sizeof(*hw);
+
+       defaults = shmem_pin_map(ce->engine->default_state);
+       if (!defaults) {
+               err = -ENOMEM;
+               goto err_lrc;
+       }
 
        x = 0;
        dw = 0;
-       hw = engine->pinned_default_state;
-       hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+       hw = defaults;
+       hw += LRC_STATE_OFFSET / sizeof(*hw);
        do {
                u32 len = hw[dw] & 0x7f;
 
@@ -5157,7 +5531,6 @@ static int compare_isolation(struct intel_engine_cs *engine,
                                               A[0][x], B[0][x], B[1][x],
                                               poison, lrc[dw + 1]);
                                        err = -EINVAL;
-                                       break;
                                }
                        }
                        dw += 2;
@@ -5166,6 +5539,8 @@ static int compare_isolation(struct intel_engine_cs *engine,
        } while (dw < PAGE_SIZE / sizeof(u32) &&
                 (hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
 
+       shmem_unpin_map(ce->engine->default_state, defaults);
+err_lrc:
        i915_gem_object_unpin_map(ce->state->obj);
 err_B1:
        i915_gem_object_unpin_map(result[1]->obj);
@@ -5296,6 +5671,7 @@ static int live_lrc_isolation(void *arg)
                0xffffffff,
                0xffff0000,
        };
+       int err = 0;
 
        /*
         * Our goal is try and verify that per-context state cannot be
@@ -5306,7 +5682,6 @@ static int live_lrc_isolation(void *arg)
         */
 
        for_each_engine(engine, gt, id) {
-               int err = 0;
                int i;
 
                /* Just don't even ask */
@@ -5315,25 +5690,180 @@ static int live_lrc_isolation(void *arg)
                        continue;
 
                intel_engine_pm_get(engine);
-               if (engine->pinned_default_state) {
-                       for (i = 0; i < ARRAY_SIZE(poison); i++) {
-                               err = __lrc_isolation(engine, poison[i]);
-                               if (err)
-                                       break;
+               for (i = 0; i < ARRAY_SIZE(poison); i++) {
+                       int result;
 
-                               err = __lrc_isolation(engine, ~poison[i]);
-                               if (err)
-                                       break;
-                       }
+                       result = __lrc_isolation(engine, poison[i]);
+                       if (result && !err)
+                               err = result;
+
+                       result = __lrc_isolation(engine, ~poison[i]);
+                       if (result && !err)
+                               err = result;
                }
                intel_engine_pm_put(engine);
+               if (igt_flush_test(gt->i915)) {
+                       err = -EIO;
+                       break;
+               }
+       }
+
+       return err;
+}
+
+static int indirect_ctx_submit_req(struct intel_context *ce)
+{
+       struct i915_request *rq;
+       int err = 0;
+
+       rq = intel_context_create_request(ce);
+       if (IS_ERR(rq))
+               return PTR_ERR(rq);
+
+       i915_request_get(rq);
+       i915_request_add(rq);
+
+       if (i915_request_wait(rq, 0, HZ / 5) < 0)
+               err = -ETIME;
+
+       i915_request_put(rq);
+
+       return err;
+}
+
+#define CTX_BB_CANARY_OFFSET (3 * 1024)
+#define CTX_BB_CANARY_INDEX  (CTX_BB_CANARY_OFFSET / sizeof(u32))
+
+static u32 *
+emit_indirect_ctx_bb_canary(const struct intel_context *ce, u32 *cs)
+{
+       *cs++ = MI_STORE_REGISTER_MEM_GEN8 |
+               MI_SRM_LRM_GLOBAL_GTT |
+               MI_LRI_LRM_CS_MMIO;
+       *cs++ = i915_mmio_reg_offset(RING_START(0));
+       *cs++ = i915_ggtt_offset(ce->state) +
+               context_wa_bb_offset(ce) +
+               CTX_BB_CANARY_OFFSET;
+       *cs++ = 0;
+
+       return cs;
+}
+
+static void
+indirect_ctx_bb_setup(struct intel_context *ce)
+{
+       u32 *cs = context_indirect_bb(ce);
+
+       cs[CTX_BB_CANARY_INDEX] = 0xdeadf00d;
+
+       setup_indirect_ctx_bb(ce, ce->engine, emit_indirect_ctx_bb_canary);
+}
+
+static bool check_ring_start(struct intel_context *ce)
+{
+       const u32 * const ctx_bb = (void *)(ce->lrc_reg_state) -
+               LRC_STATE_OFFSET + context_wa_bb_offset(ce);
+
+       if (ctx_bb[CTX_BB_CANARY_INDEX] == ce->lrc_reg_state[CTX_RING_START])
+               return true;
+
+       pr_err("ring start mismatch: canary 0x%08x vs state 0x%08x\n",
+              ctx_bb[CTX_BB_CANARY_INDEX],
+              ce->lrc_reg_state[CTX_RING_START]);
+
+       return false;
+}
+
+static int indirect_ctx_bb_check(struct intel_context *ce)
+{
+       int err;
+
+       err = indirect_ctx_submit_req(ce);
+       if (err)
+               return err;
+
+       if (!check_ring_start(ce))
+               return -EINVAL;
+
+       return 0;
+}
+
+static int __live_lrc_indirect_ctx_bb(struct intel_engine_cs *engine)
+{
+       struct intel_context *a, *b;
+       int err;
+
+       a = intel_context_create(engine);
+       if (IS_ERR(a))
+               return PTR_ERR(a);
+       err = intel_context_pin(a);
+       if (err)
+               goto put_a;
+
+       b = intel_context_create(engine);
+       if (IS_ERR(b)) {
+               err = PTR_ERR(b);
+               goto unpin_a;
+       }
+       err = intel_context_pin(b);
+       if (err)
+               goto put_b;
+
+       /* We use the already reserved extra page in context state */
+       if (!a->wa_bb_page) {
+               GEM_BUG_ON(b->wa_bb_page);
+               GEM_BUG_ON(INTEL_GEN(engine->i915) == 12);
+               goto unpin_b;
+       }
+
+       /*
+        * In order to test that our per context bb is truly per context,
+        * and executes at the intended spot on context restoring process,
+        * make the batch store the ring start value to memory.
+        * As ring start is restored apriori of starting the indirect ctx bb and
+        * as it will be different for each context, it fits to this purpose.
+        */
+       indirect_ctx_bb_setup(a);
+       indirect_ctx_bb_setup(b);
+
+       err = indirect_ctx_bb_check(a);
+       if (err)
+               goto unpin_b;
+
+       err = indirect_ctx_bb_check(b);
+
+unpin_b:
+       intel_context_unpin(b);
+put_b:
+       intel_context_put(b);
+unpin_a:
+       intel_context_unpin(a);
+put_a:
+       intel_context_put(a);
+
+       return err;
+}
+
+static int live_lrc_indirect_ctx_bb(void *arg)
+{
+       struct intel_gt *gt = arg;
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+       int err = 0;
+
+       for_each_engine(engine, gt, id) {
+               intel_engine_pm_get(engine);
+               err = __live_lrc_indirect_ctx_bb(engine);
+               intel_engine_pm_put(engine);
+
                if (igt_flush_test(gt->i915))
                        err = -EIO;
+
                if (err)
-                       return err;
+                       break;
        }
 
-       return 0;
+       return err;
 }
 
 static void garbage_reset(struct intel_engine_cs *engine,
@@ -5367,7 +5897,7 @@ static struct i915_request *garbage(struct intel_context *ce,
        prandom_bytes_state(prng,
                            ce->lrc_reg_state,
                            ce->engine->context_size -
-                           LRC_STATE_PN * PAGE_SIZE);
+                           LRC_STATE_OFFSET);
 
        rq = intel_context_create_request(ce);
        if (IS_ERR(rq)) {
@@ -5571,6 +6101,7 @@ int intel_lrc_live_selftests(struct drm_i915_private *i915)
                SUBTEST(live_lrc_timestamp),
                SUBTEST(live_lrc_garbage),
                SUBTEST(live_pphwsp_runtime),
+               SUBTEST(live_lrc_indirect_ctx_bb),
        };
 
        if (!HAS_LOGICAL_RING_CONTEXTS(i915))
index 95b165faeba78f2fe39dfa92781f1a3c37e73ddf..2dc460624bbc4cf71e3934aa3766658fcbb96555 100644 (file)
@@ -11,6 +11,7 @@
 #include "selftest_rc6.h"
 
 #include "selftests/i915_random.h"
+#include "selftests/librapl.h"
 
 static u64 rc6_residency(struct intel_rc6 *rc6)
 {
@@ -31,7 +32,9 @@ int live_rc6_manual(void *arg)
 {
        struct intel_gt *gt = arg;
        struct intel_rc6 *rc6 = &gt->rc6;
+       u64 rc0_power, rc6_power;
        intel_wakeref_t wakeref;
+       ktime_t dt;
        u64 res[2];
        int err = 0;
 
@@ -54,7 +57,12 @@ int live_rc6_manual(void *arg)
        msleep(1); /* wakeup is not immediate, takes about 100us on icl */
 
        res[0] = rc6_residency(rc6);
+
+       dt = ktime_get();
+       rc0_power = librapl_energy_uJ();
        msleep(250);
+       rc0_power = librapl_energy_uJ() - rc0_power;
+       dt = ktime_sub(ktime_get(), dt);
        res[1] = rc6_residency(rc6);
        if ((res[1] - res[0]) >> 10) {
                pr_err("RC6 residency increased by %lldus while disabled for 250ms!\n",
@@ -63,13 +71,24 @@ int live_rc6_manual(void *arg)
                goto out_unlock;
        }
 
+       rc0_power = div64_u64(NSEC_PER_SEC * rc0_power, ktime_to_ns(dt));
+       if (!rc0_power) {
+               pr_err("No power measured while in RC0\n");
+               err = -EINVAL;
+               goto out_unlock;
+       }
+
        /* Manually enter RC6 */
        intel_rc6_park(rc6);
 
        res[0] = rc6_residency(rc6);
+       intel_uncore_forcewake_flush(rc6_to_uncore(rc6), FORCEWAKE_ALL);
+       dt = ktime_get();
+       rc6_power = librapl_energy_uJ();
        msleep(100);
+       rc6_power = librapl_energy_uJ() - rc6_power;
+       dt = ktime_sub(ktime_get(), dt);
        res[1] = rc6_residency(rc6);
-
        if (res[1] == res[0]) {
                pr_err("Did not enter RC6! RC6_STATE=%08x, RC6_CONTROL=%08x, residency=%lld\n",
                       intel_uncore_read_fw(gt->uncore, GEN6_RC_STATE),
@@ -78,6 +97,15 @@ int live_rc6_manual(void *arg)
                err = -EINVAL;
        }
 
+       rc6_power = div64_u64(NSEC_PER_SEC * rc6_power, ktime_to_ns(dt));
+       pr_info("GPU consumed %llduW in RC0 and %llduW in RC6\n",
+               rc0_power, rc6_power);
+       if (2 * rc6_power > rc0_power) {
+               pr_err("GPU leaked energy while in RC6!\n");
+               err = -EINVAL;
+               goto out_unlock;
+       }
+
        /* Restore what should have been the original state! */
        intel_rc6_unpark(rc6);
 
index 9995faadd7e81b6a0dc5ed58baa0ab4b0d020f90..3350e7c995bccff92751c8b448f81976d0c6a4c3 100644 (file)
@@ -54,6 +54,8 @@ static struct i915_vma *create_wally(struct intel_engine_cs *engine)
        *cs++ = STACK_MAGIC;
 
        *cs++ = MI_BATCH_BUFFER_END;
+
+       i915_gem_object_flush_map(obj);
        i915_gem_object_unpin_map(obj);
 
        vma->private = intel_context_create(engine); /* dummy residuals */
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c
new file mode 100644 (file)
index 0000000..6275d69
--- /dev/null
@@ -0,0 +1,1331 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/pm_qos.h>
+#include <linux/sort.h>
+
+#include "intel_engine_heartbeat.h"
+#include "intel_engine_pm.h"
+#include "intel_gpu_commands.h"
+#include "intel_gt_clock_utils.h"
+#include "intel_gt_pm.h"
+#include "intel_rc6.h"
+#include "selftest_rps.h"
+#include "selftests/igt_flush_test.h"
+#include "selftests/igt_spinner.h"
+#include "selftests/librapl.h"
+
+/* Try to isolate the impact of cstates from determing frequency response */
+#define CPU_LATENCY 0 /* -1 to disable pm_qos, 0 to disable cstates */
+
+static unsigned long engine_heartbeat_disable(struct intel_engine_cs *engine)
+{
+       unsigned long old;
+
+       old = fetch_and_zero(&engine->props.heartbeat_interval_ms);
+
+       intel_engine_pm_get(engine);
+       intel_engine_park_heartbeat(engine);
+
+       return old;
+}
+
+static void engine_heartbeat_enable(struct intel_engine_cs *engine,
+                                   unsigned long saved)
+{
+       intel_engine_pm_put(engine);
+
+       engine->props.heartbeat_interval_ms = saved;
+}
+
+static void dummy_rps_work(struct work_struct *wrk)
+{
+}
+
+static int cmp_u64(const void *A, const void *B)
+{
+       const u64 *a = A, *b = B;
+
+       if (a < b)
+               return -1;
+       else if (a > b)
+               return 1;
+       else
+               return 0;
+}
+
+static int cmp_u32(const void *A, const void *B)
+{
+       const u32 *a = A, *b = B;
+
+       if (a < b)
+               return -1;
+       else if (a > b)
+               return 1;
+       else
+               return 0;
+}
+
+static struct i915_vma *
+create_spin_counter(struct intel_engine_cs *engine,
+                   struct i915_address_space *vm,
+                   bool srm,
+                   u32 **cancel,
+                   u32 **counter)
+{
+       enum {
+               COUNT,
+               INC,
+               __NGPR__,
+       };
+#define CS_GPR(x) GEN8_RING_CS_GPR(engine->mmio_base, x)
+       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
+       unsigned long end;
+       u32 *base, *cs;
+       int loop, i;
+       int err;
+
+       obj = i915_gem_object_create_internal(vm->i915, 64 << 10);
+       if (IS_ERR(obj))
+               return ERR_CAST(obj);
+
+       end = obj->base.size / sizeof(u32) - 1;
+
+       vma = i915_vma_instance(obj, vm, NULL);
+       if (IS_ERR(vma)) {
+               i915_gem_object_put(obj);
+               return vma;
+       }
+
+       err = i915_vma_pin(vma, 0, 0, PIN_USER);
+       if (err) {
+               i915_vma_put(vma);
+               return ERR_PTR(err);
+       }
+
+       base = i915_gem_object_pin_map(obj, I915_MAP_WC);
+       if (IS_ERR(base)) {
+               i915_gem_object_put(obj);
+               return ERR_CAST(base);
+       }
+       cs = base;
+
+       *cs++ = MI_LOAD_REGISTER_IMM(__NGPR__ * 2);
+       for (i = 0; i < __NGPR__; i++) {
+               *cs++ = i915_mmio_reg_offset(CS_GPR(i));
+               *cs++ = 0;
+               *cs++ = i915_mmio_reg_offset(CS_GPR(i)) + 4;
+               *cs++ = 0;
+       }
+
+       *cs++ = MI_LOAD_REGISTER_IMM(1);
+       *cs++ = i915_mmio_reg_offset(CS_GPR(INC));
+       *cs++ = 1;
+
+       loop = cs - base;
+
+       /* Unroll the loop to avoid MI_BB_START stalls impacting measurements */
+       for (i = 0; i < 1024; i++) {
+               *cs++ = MI_MATH(4);
+               *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(COUNT));
+               *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(INC));
+               *cs++ = MI_MATH_ADD;
+               *cs++ = MI_MATH_STORE(MI_MATH_REG(COUNT), MI_MATH_REG_ACCU);
+
+               if (srm) {
+                       *cs++ = MI_STORE_REGISTER_MEM_GEN8;
+                       *cs++ = i915_mmio_reg_offset(CS_GPR(COUNT));
+                       *cs++ = lower_32_bits(vma->node.start + end * sizeof(*cs));
+                       *cs++ = upper_32_bits(vma->node.start + end * sizeof(*cs));
+               }
+       }
+
+       *cs++ = MI_BATCH_BUFFER_START_GEN8;
+       *cs++ = lower_32_bits(vma->node.start + loop * sizeof(*cs));
+       *cs++ = upper_32_bits(vma->node.start + loop * sizeof(*cs));
+       GEM_BUG_ON(cs - base > end);
+
+       i915_gem_object_flush_map(obj);
+
+       *cancel = base + loop;
+       *counter = srm ? memset32(base + end, 0, 1) : NULL;
+       return vma;
+}
+
+static u8 wait_for_freq(struct intel_rps *rps, u8 freq, int timeout_ms)
+{
+       u8 history[64], i;
+       unsigned long end;
+       int sleep;
+
+       i = 0;
+       memset(history, freq, sizeof(history));
+       sleep = 20;
+
+       /* The PCU does not change instantly, but drifts towards the goal? */
+       end = jiffies + msecs_to_jiffies(timeout_ms);
+       do {
+               u8 act;
+
+               act = read_cagf(rps);
+               if (time_after(jiffies, end))
+                       return act;
+
+               /* Target acquired */
+               if (act == freq)
+                       return act;
+
+               /* Any change within the last N samples? */
+               if (!memchr_inv(history, act, sizeof(history)))
+                       return act;
+
+               history[i] = act;
+               i = (i + 1) % ARRAY_SIZE(history);
+
+               usleep_range(sleep, 2 * sleep);
+               sleep *= 2;
+               if (sleep > timeout_ms * 20)
+                       sleep = timeout_ms * 20;
+       } while (1);
+}
+
+static u8 rps_set_check(struct intel_rps *rps, u8 freq)
+{
+       mutex_lock(&rps->lock);
+       GEM_BUG_ON(!intel_rps_is_active(rps));
+       intel_rps_set(rps, freq);
+       GEM_BUG_ON(rps->last_freq != freq);
+       mutex_unlock(&rps->lock);
+
+       return wait_for_freq(rps, freq, 50);
+}
+
+static void show_pstate_limits(struct intel_rps *rps)
+{
+       struct drm_i915_private *i915 = rps_to_i915(rps);
+
+       if (IS_BROXTON(i915)) {
+               pr_info("P_STATE_CAP[%x]: 0x%08x\n",
+                       i915_mmio_reg_offset(BXT_RP_STATE_CAP),
+                       intel_uncore_read(rps_to_uncore(rps),
+                                         BXT_RP_STATE_CAP));
+       } else if (IS_GEN(i915, 9)) {
+               pr_info("P_STATE_LIMITS[%x]: 0x%08x\n",
+                       i915_mmio_reg_offset(GEN9_RP_STATE_LIMITS),
+                       intel_uncore_read(rps_to_uncore(rps),
+                                         GEN9_RP_STATE_LIMITS));
+       }
+}
+
+int live_rps_clock_interval(void *arg)
+{
+       struct intel_gt *gt = arg;
+       struct intel_rps *rps = &gt->rps;
+       void (*saved_work)(struct work_struct *wrk);
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+       struct igt_spinner spin;
+       int err = 0;
+
+       if (!intel_rps_is_enabled(rps))
+               return 0;
+
+       if (igt_spinner_init(&spin, gt))
+               return -ENOMEM;
+
+       intel_gt_pm_wait_for_idle(gt);
+       saved_work = rps->work.func;
+       rps->work.func = dummy_rps_work;
+
+       intel_gt_pm_get(gt);
+       intel_rps_disable(&gt->rps);
+
+       intel_gt_check_clock_frequency(gt);
+
+       for_each_engine(engine, gt, id) {
+               unsigned long saved_heartbeat;
+               struct i915_request *rq;
+               u32 cycles;
+               u64 dt;
+
+               if (!intel_engine_can_store_dword(engine))
+                       continue;
+
+               saved_heartbeat = engine_heartbeat_disable(engine);
+
+               rq = igt_spinner_create_request(&spin,
+                                               engine->kernel_context,
+                                               MI_NOOP);
+               if (IS_ERR(rq)) {
+                       engine_heartbeat_enable(engine, saved_heartbeat);
+                       err = PTR_ERR(rq);
+                       break;
+               }
+
+               i915_request_add(rq);
+
+               if (!igt_wait_for_spinner(&spin, rq)) {
+                       pr_err("%s: RPS spinner did not start\n",
+                              engine->name);
+                       igt_spinner_end(&spin);
+                       engine_heartbeat_enable(engine, saved_heartbeat);
+                       intel_gt_set_wedged(engine->gt);
+                       err = -EIO;
+                       break;
+               }
+
+               intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
+
+               intel_uncore_write_fw(gt->uncore, GEN6_RP_CUR_UP_EI, 0);
+
+               /* Set the evaluation interval to infinity! */
+               intel_uncore_write_fw(gt->uncore,
+                                     GEN6_RP_UP_EI, 0xffffffff);
+               intel_uncore_write_fw(gt->uncore,
+                                     GEN6_RP_UP_THRESHOLD, 0xffffffff);
+
+               intel_uncore_write_fw(gt->uncore, GEN6_RP_CONTROL,
+                                     GEN6_RP_ENABLE | GEN6_RP_UP_BUSY_AVG);
+
+               if (wait_for(intel_uncore_read_fw(gt->uncore,
+                                                 GEN6_RP_CUR_UP_EI),
+                            10)) {
+                       /* Just skip the test; assume lack of HW support */
+                       pr_notice("%s: rps evaluation interval not ticking\n",
+                                 engine->name);
+                       err = -ENODEV;
+               } else {
+                       ktime_t dt_[5];
+                       u32 cycles_[5];
+                       int i;
+
+                       for (i = 0; i < 5; i++) {
+                               preempt_disable();
+
+                               dt_[i] = ktime_get();
+                               cycles_[i] = -intel_uncore_read_fw(gt->uncore, GEN6_RP_CUR_UP_EI);
+
+                               udelay(1000);
+
+                               dt_[i] = ktime_sub(ktime_get(), dt_[i]);
+                               cycles_[i] += intel_uncore_read_fw(gt->uncore, GEN6_RP_CUR_UP_EI);
+
+                               preempt_enable();
+                       }
+
+                       /* Use the median of both cycle/dt; close enough */
+                       sort(cycles_, 5, sizeof(*cycles_), cmp_u32, NULL);
+                       cycles = (cycles_[1] + 2 * cycles_[2] + cycles_[3]) / 4;
+                       sort(dt_, 5, sizeof(*dt_), cmp_u64, NULL);
+                       dt = div_u64(dt_[1] + 2 * dt_[2] + dt_[3], 4);
+               }
+
+               intel_uncore_write_fw(gt->uncore, GEN6_RP_CONTROL, 0);
+               intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
+
+               igt_spinner_end(&spin);
+               engine_heartbeat_enable(engine, saved_heartbeat);
+
+               if (err == 0) {
+                       u64 time = intel_gt_pm_interval_to_ns(gt, cycles);
+                       u32 expected =
+                               intel_gt_ns_to_pm_interval(gt, dt);
+
+                       pr_info("%s: rps counted %d C0 cycles [%lldns] in %lldns [%d cycles], using GT clock frequency of %uKHz\n",
+                               engine->name, cycles, time, dt, expected,
+                               gt->clock_frequency / 1000);
+
+                       if (10 * time < 8 * dt ||
+                           8 * time > 10 * dt) {
+                               pr_err("%s: rps clock time does not match walltime!\n",
+                                      engine->name);
+                               err = -EINVAL;
+                       }
+
+                       if (10 * expected < 8 * cycles ||
+                           8 * expected > 10 * cycles) {
+                               pr_err("%s: walltime does not match rps clock ticks!\n",
+                                      engine->name);
+                               err = -EINVAL;
+                       }
+               }
+
+               if (igt_flush_test(gt->i915))
+                       err = -EIO;
+
+               break; /* once is enough */
+       }
+
+       intel_rps_enable(&gt->rps);
+       intel_gt_pm_put(gt);
+
+       igt_spinner_fini(&spin);
+
+       intel_gt_pm_wait_for_idle(gt);
+       rps->work.func = saved_work;
+
+       if (err == -ENODEV) /* skipped, don't report a fail */
+               err = 0;
+
+       return err;
+}
+
+int live_rps_control(void *arg)
+{
+       struct intel_gt *gt = arg;
+       struct intel_rps *rps = &gt->rps;
+       void (*saved_work)(struct work_struct *wrk);
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+       struct igt_spinner spin;
+       int err = 0;
+
+       /*
+        * Check that the actual frequency matches our requested frequency,
+        * to verify our control mechanism. We have to be careful that the
+        * PCU may throttle the GPU in which case the actual frequency used
+        * will be lowered than requested.
+        */
+
+       if (!intel_rps_is_enabled(rps))
+               return 0;
+
+       if (IS_CHERRYVIEW(gt->i915)) /* XXX fragile PCU */
+               return 0;
+
+       if (igt_spinner_init(&spin, gt))
+               return -ENOMEM;
+
+       intel_gt_pm_wait_for_idle(gt);
+       saved_work = rps->work.func;
+       rps->work.func = dummy_rps_work;
+
+       intel_gt_pm_get(gt);
+       for_each_engine(engine, gt, id) {
+               unsigned long saved_heartbeat;
+               struct i915_request *rq;
+               ktime_t min_dt, max_dt;
+               int f, limit;
+               int min, max;
+
+               if (!intel_engine_can_store_dword(engine))
+                       continue;
+
+               saved_heartbeat = engine_heartbeat_disable(engine);
+
+               rq = igt_spinner_create_request(&spin,
+                                               engine->kernel_context,
+                                               MI_NOOP);
+               if (IS_ERR(rq)) {
+                       err = PTR_ERR(rq);
+                       break;
+               }
+
+               i915_request_add(rq);
+
+               if (!igt_wait_for_spinner(&spin, rq)) {
+                       pr_err("%s: RPS spinner did not start\n",
+                              engine->name);
+                       igt_spinner_end(&spin);
+                       engine_heartbeat_enable(engine, saved_heartbeat);
+                       intel_gt_set_wedged(engine->gt);
+                       err = -EIO;
+                       break;
+               }
+
+               if (rps_set_check(rps, rps->min_freq) != rps->min_freq) {
+                       pr_err("%s: could not set minimum frequency [%x], only %x!\n",
+                              engine->name, rps->min_freq, read_cagf(rps));
+                       igt_spinner_end(&spin);
+                       engine_heartbeat_enable(engine, saved_heartbeat);
+                       show_pstate_limits(rps);
+                       err = -EINVAL;
+                       break;
+               }
+
+               for (f = rps->min_freq + 1; f < rps->max_freq; f++) {
+                       if (rps_set_check(rps, f) < f)
+                               break;
+               }
+
+               limit = rps_set_check(rps, f);
+
+               if (rps_set_check(rps, rps->min_freq) != rps->min_freq) {
+                       pr_err("%s: could not restore minimum frequency [%x], only %x!\n",
+                              engine->name, rps->min_freq, read_cagf(rps));
+                       igt_spinner_end(&spin);
+                       engine_heartbeat_enable(engine, saved_heartbeat);
+                       show_pstate_limits(rps);
+                       err = -EINVAL;
+                       break;
+               }
+
+               max_dt = ktime_get();
+               max = rps_set_check(rps, limit);
+               max_dt = ktime_sub(ktime_get(), max_dt);
+
+               min_dt = ktime_get();
+               min = rps_set_check(rps, rps->min_freq);
+               min_dt = ktime_sub(ktime_get(), min_dt);
+
+               igt_spinner_end(&spin);
+               engine_heartbeat_enable(engine, saved_heartbeat);
+
+               pr_info("%s: range:[%x:%uMHz, %x:%uMHz] limit:[%x:%uMHz], %x:%x response %lluns:%lluns\n",
+                       engine->name,
+                       rps->min_freq, intel_gpu_freq(rps, rps->min_freq),
+                       rps->max_freq, intel_gpu_freq(rps, rps->max_freq),
+                       limit, intel_gpu_freq(rps, limit),
+                       min, max, ktime_to_ns(min_dt), ktime_to_ns(max_dt));
+
+               if (limit == rps->min_freq) {
+                       pr_err("%s: GPU throttled to minimum!\n",
+                              engine->name);
+                       show_pstate_limits(rps);
+                       err = -ENODEV;
+                       break;
+               }
+
+               if (igt_flush_test(gt->i915)) {
+                       err = -EIO;
+                       break;
+               }
+       }
+       intel_gt_pm_put(gt);
+
+       igt_spinner_fini(&spin);
+
+       intel_gt_pm_wait_for_idle(gt);
+       rps->work.func = saved_work;
+
+       return err;
+}
+
+static void show_pcu_config(struct intel_rps *rps)
+{
+       struct drm_i915_private *i915 = rps_to_i915(rps);
+       unsigned int max_gpu_freq, min_gpu_freq;
+       intel_wakeref_t wakeref;
+       int gpu_freq;
+
+       if (!HAS_LLC(i915))
+               return;
+
+       min_gpu_freq = rps->min_freq;
+       max_gpu_freq = rps->max_freq;
+       if (INTEL_GEN(i915) >= 9) {
+               /* Convert GT frequency to 50 HZ units */
+               min_gpu_freq /= GEN9_FREQ_SCALER;
+               max_gpu_freq /= GEN9_FREQ_SCALER;
+       }
+
+       wakeref = intel_runtime_pm_get(rps_to_uncore(rps)->rpm);
+
+       pr_info("%5s  %5s  %5s\n", "GPU", "eCPU", "eRing");
+       for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
+               int ia_freq = gpu_freq;
+
+               sandybridge_pcode_read(i915,
+                                      GEN6_PCODE_READ_MIN_FREQ_TABLE,
+                                      &ia_freq, NULL);
+
+               pr_info("%5d  %5d  %5d\n",
+                       gpu_freq * 50,
+                       ((ia_freq >> 0) & 0xff) * 100,
+                       ((ia_freq >> 8) & 0xff) * 100);
+       }
+
+       intel_runtime_pm_put(rps_to_uncore(rps)->rpm, wakeref);
+}
+
+static u64 __measure_frequency(u32 *cntr, int duration_ms)
+{
+       u64 dc, dt;
+
+       dt = ktime_get();
+       dc = READ_ONCE(*cntr);
+       usleep_range(1000 * duration_ms, 2000 * duration_ms);
+       dc = READ_ONCE(*cntr) - dc;
+       dt = ktime_get() - dt;
+
+       return div64_u64(1000 * 1000 * dc, dt);
+}
+
+static u64 measure_frequency_at(struct intel_rps *rps, u32 *cntr, int *freq)
+{
+       u64 x[5];
+       int i;
+
+       *freq = rps_set_check(rps, *freq);
+       for (i = 0; i < 5; i++)
+               x[i] = __measure_frequency(cntr, 2);
+       *freq = (*freq + read_cagf(rps)) / 2;
+
+       /* A simple triangle filter for better result stability */
+       sort(x, 5, sizeof(*x), cmp_u64, NULL);
+       return div_u64(x[1] + 2 * x[2] + x[3], 4);
+}
+
+static u64 __measure_cs_frequency(struct intel_engine_cs *engine,
+                                 int duration_ms)
+{
+       u64 dc, dt;
+
+       dt = ktime_get();
+       dc = intel_uncore_read_fw(engine->uncore, CS_GPR(0));
+       usleep_range(1000 * duration_ms, 2000 * duration_ms);
+       dc = intel_uncore_read_fw(engine->uncore, CS_GPR(0)) - dc;
+       dt = ktime_get() - dt;
+
+       return div64_u64(1000 * 1000 * dc, dt);
+}
+
+static u64 measure_cs_frequency_at(struct intel_rps *rps,
+                                  struct intel_engine_cs *engine,
+                                  int *freq)
+{
+       u64 x[5];
+       int i;
+
+       *freq = rps_set_check(rps, *freq);
+       for (i = 0; i < 5; i++)
+               x[i] = __measure_cs_frequency(engine, 2);
+       *freq = (*freq + read_cagf(rps)) / 2;
+
+       /* A simple triangle filter for better result stability */
+       sort(x, 5, sizeof(*x), cmp_u64, NULL);
+       return div_u64(x[1] + 2 * x[2] + x[3], 4);
+}
+
+static bool scaled_within(u64 x, u64 y, u32 f_n, u32 f_d)
+{
+       return f_d * x > f_n * y && f_n * x < f_d * y;
+}
+
+int live_rps_frequency_cs(void *arg)
+{
+       void (*saved_work)(struct work_struct *wrk);
+       struct intel_gt *gt = arg;
+       struct intel_rps *rps = &gt->rps;
+       struct intel_engine_cs *engine;
+       struct pm_qos_request qos;
+       enum intel_engine_id id;
+       int err = 0;
+
+       /*
+        * The premise is that the GPU does change freqency at our behest.
+        * Let's check there is a correspondence between the requested
+        * frequency, the actual frequency, and the observed clock rate.
+        */
+
+       if (!intel_rps_is_enabled(rps))
+               return 0;
+
+       if (INTEL_GEN(gt->i915) < 8) /* for CS simplicity */
+               return 0;
+
+       if (CPU_LATENCY >= 0)
+               cpu_latency_qos_add_request(&qos, CPU_LATENCY);
+
+       intel_gt_pm_wait_for_idle(gt);
+       saved_work = rps->work.func;
+       rps->work.func = dummy_rps_work;
+
+       for_each_engine(engine, gt, id) {
+               unsigned long saved_heartbeat;
+               struct i915_request *rq;
+               struct i915_vma *vma;
+               u32 *cancel, *cntr;
+               struct {
+                       u64 count;
+                       int freq;
+               } min, max;
+
+               saved_heartbeat = engine_heartbeat_disable(engine);
+
+               vma = create_spin_counter(engine,
+                                         engine->kernel_context->vm, false,
+                                         &cancel, &cntr);
+               if (IS_ERR(vma)) {
+                       err = PTR_ERR(vma);
+                       engine_heartbeat_enable(engine, saved_heartbeat);
+                       break;
+               }
+
+               rq = intel_engine_create_kernel_request(engine);
+               if (IS_ERR(rq)) {
+                       err = PTR_ERR(rq);
+                       goto err_vma;
+               }
+
+               i915_vma_lock(vma);
+               err = i915_request_await_object(rq, vma->obj, false);
+               if (!err)
+                       err = i915_vma_move_to_active(vma, rq, 0);
+               if (!err)
+                       err = rq->engine->emit_bb_start(rq,
+                                                       vma->node.start,
+                                                       PAGE_SIZE, 0);
+               i915_vma_unlock(vma);
+               i915_request_add(rq);
+               if (err)
+                       goto err_vma;
+
+               if (wait_for(intel_uncore_read(engine->uncore, CS_GPR(0)),
+                            10)) {
+                       pr_err("%s: timed loop did not start\n",
+                              engine->name);
+                       goto err_vma;
+               }
+
+               min.freq = rps->min_freq;
+               min.count = measure_cs_frequency_at(rps, engine, &min.freq);
+
+               max.freq = rps->max_freq;
+               max.count = measure_cs_frequency_at(rps, engine, &max.freq);
+
+               pr_info("%s: min:%lluKHz @ %uMHz, max:%lluKHz @ %uMHz [%d%%]\n",
+                       engine->name,
+                       min.count, intel_gpu_freq(rps, min.freq),
+                       max.count, intel_gpu_freq(rps, max.freq),
+                       (int)DIV64_U64_ROUND_CLOSEST(100 * min.freq * max.count,
+                                                    max.freq * min.count));
+
+               if (!scaled_within(max.freq * min.count,
+                                  min.freq * max.count,
+                                  2, 3)) {
+                       int f;
+
+                       pr_err("%s: CS did not scale with frequency! scaled min:%llu, max:%llu\n",
+                              engine->name,
+                              max.freq * min.count,
+                              min.freq * max.count);
+                       show_pcu_config(rps);
+
+                       for (f = min.freq + 1; f <= rps->max_freq; f++) {
+                               int act = f;
+                               u64 count;
+
+                               count = measure_cs_frequency_at(rps, engine, &act);
+                               if (act < f)
+                                       break;
+
+                               pr_info("%s: %x:%uMHz: %lluKHz [%d%%]\n",
+                                       engine->name,
+                                       act, intel_gpu_freq(rps, act), count,
+                                       (int)DIV64_U64_ROUND_CLOSEST(100 * min.freq * count,
+                                                                    act * min.count));
+
+                               f = act; /* may skip ahead [pcu granularity] */
+                       }
+
+                       err = -EINVAL;
+               }
+
+err_vma:
+               *cancel = MI_BATCH_BUFFER_END;
+               i915_gem_object_flush_map(vma->obj);
+               i915_gem_object_unpin_map(vma->obj);
+               i915_vma_unpin(vma);
+               i915_vma_put(vma);
+
+               engine_heartbeat_enable(engine, saved_heartbeat);
+               if (igt_flush_test(gt->i915))
+                       err = -EIO;
+               if (err)
+                       break;
+       }
+
+       intel_gt_pm_wait_for_idle(gt);
+       rps->work.func = saved_work;
+
+       if (CPU_LATENCY >= 0)
+               cpu_latency_qos_remove_request(&qos);
+
+       return err;
+}
+
+int live_rps_frequency_srm(void *arg)
+{
+       void (*saved_work)(struct work_struct *wrk);
+       struct intel_gt *gt = arg;
+       struct intel_rps *rps = &gt->rps;
+       struct intel_engine_cs *engine;
+       struct pm_qos_request qos;
+       enum intel_engine_id id;
+       int err = 0;
+
+       /*
+        * The premise is that the GPU does change freqency at our behest.
+        * Let's check there is a correspondence between the requested
+        * frequency, the actual frequency, and the observed clock rate.
+        */
+
+       if (!intel_rps_is_enabled(rps))
+               return 0;
+
+       if (INTEL_GEN(gt->i915) < 8) /* for CS simplicity */
+               return 0;
+
+       if (CPU_LATENCY >= 0)
+               cpu_latency_qos_add_request(&qos, CPU_LATENCY);
+
+       intel_gt_pm_wait_for_idle(gt);
+       saved_work = rps->work.func;
+       rps->work.func = dummy_rps_work;
+
+       for_each_engine(engine, gt, id) {
+               unsigned long saved_heartbeat;
+               struct i915_request *rq;
+               struct i915_vma *vma;
+               u32 *cancel, *cntr;
+               struct {
+                       u64 count;
+                       int freq;
+               } min, max;
+
+               saved_heartbeat = engine_heartbeat_disable(engine);
+
+               vma = create_spin_counter(engine,
+                                         engine->kernel_context->vm, true,
+                                         &cancel, &cntr);
+               if (IS_ERR(vma)) {
+                       err = PTR_ERR(vma);
+                       engine_heartbeat_enable(engine, saved_heartbeat);
+                       break;
+               }
+
+               rq = intel_engine_create_kernel_request(engine);
+               if (IS_ERR(rq)) {
+                       err = PTR_ERR(rq);
+                       goto err_vma;
+               }
+
+               i915_vma_lock(vma);
+               err = i915_request_await_object(rq, vma->obj, false);
+               if (!err)
+                       err = i915_vma_move_to_active(vma, rq, 0);
+               if (!err)
+                       err = rq->engine->emit_bb_start(rq,
+                                                       vma->node.start,
+                                                       PAGE_SIZE, 0);
+               i915_vma_unlock(vma);
+               i915_request_add(rq);
+               if (err)
+                       goto err_vma;
+
+               if (wait_for(READ_ONCE(*cntr), 10)) {
+                       pr_err("%s: timed loop did not start\n",
+                              engine->name);
+                       goto err_vma;
+               }
+
+               min.freq = rps->min_freq;
+               min.count = measure_frequency_at(rps, cntr, &min.freq);
+
+               max.freq = rps->max_freq;
+               max.count = measure_frequency_at(rps, cntr, &max.freq);
+
+               pr_info("%s: min:%lluKHz @ %uMHz, max:%lluKHz @ %uMHz [%d%%]\n",
+                       engine->name,
+                       min.count, intel_gpu_freq(rps, min.freq),
+                       max.count, intel_gpu_freq(rps, max.freq),
+                       (int)DIV64_U64_ROUND_CLOSEST(100 * min.freq * max.count,
+                                                    max.freq * min.count));
+
+               if (!scaled_within(max.freq * min.count,
+                                  min.freq * max.count,
+                                  1, 2)) {
+                       int f;
+
+                       pr_err("%s: CS did not scale with frequency! scaled min:%llu, max:%llu\n",
+                              engine->name,
+                              max.freq * min.count,
+                              min.freq * max.count);
+                       show_pcu_config(rps);
+
+                       for (f = min.freq + 1; f <= rps->max_freq; f++) {
+                               int act = f;
+                               u64 count;
+
+                               count = measure_frequency_at(rps, cntr, &act);
+                               if (act < f)
+                                       break;
+
+                               pr_info("%s: %x:%uMHz: %lluKHz [%d%%]\n",
+                                       engine->name,
+                                       act, intel_gpu_freq(rps, act), count,
+                                       (int)DIV64_U64_ROUND_CLOSEST(100 * min.freq * count,
+                                                                    act * min.count));
+
+                               f = act; /* may skip ahead [pcu granularity] */
+                       }
+
+                       err = -EINVAL;
+               }
+
+err_vma:
+               *cancel = MI_BATCH_BUFFER_END;
+               i915_gem_object_flush_map(vma->obj);
+               i915_gem_object_unpin_map(vma->obj);
+               i915_vma_unpin(vma);
+               i915_vma_put(vma);
+
+               engine_heartbeat_enable(engine, saved_heartbeat);
+               if (igt_flush_test(gt->i915))
+                       err = -EIO;
+               if (err)
+                       break;
+       }
+
+       intel_gt_pm_wait_for_idle(gt);
+       rps->work.func = saved_work;
+
+       if (CPU_LATENCY >= 0)
+               cpu_latency_qos_remove_request(&qos);
+
+       return err;
+}
+
+static void sleep_for_ei(struct intel_rps *rps, int timeout_us)
+{
+       /* Flush any previous EI */
+       usleep_range(timeout_us, 2 * timeout_us);
+
+       /* Reset the interrupt status */
+       rps_disable_interrupts(rps);
+       GEM_BUG_ON(rps->pm_iir);
+       rps_enable_interrupts(rps);
+
+       /* And then wait for the timeout, for real this time */
+       usleep_range(2 * timeout_us, 3 * timeout_us);
+}
+
+static int __rps_up_interrupt(struct intel_rps *rps,
+                             struct intel_engine_cs *engine,
+                             struct igt_spinner *spin)
+{
+       struct intel_uncore *uncore = engine->uncore;
+       struct i915_request *rq;
+       u32 timeout;
+
+       if (!intel_engine_can_store_dword(engine))
+               return 0;
+
+       rps_set_check(rps, rps->min_freq);
+
+       rq = igt_spinner_create_request(spin, engine->kernel_context, MI_NOOP);
+       if (IS_ERR(rq))
+               return PTR_ERR(rq);
+
+       i915_request_get(rq);
+       i915_request_add(rq);
+
+       if (!igt_wait_for_spinner(spin, rq)) {
+               pr_err("%s: RPS spinner did not start\n",
+                      engine->name);
+               i915_request_put(rq);
+               intel_gt_set_wedged(engine->gt);
+               return -EIO;
+       }
+
+       if (!intel_rps_is_active(rps)) {
+               pr_err("%s: RPS not enabled on starting spinner\n",
+                      engine->name);
+               igt_spinner_end(spin);
+               i915_request_put(rq);
+               return -EINVAL;
+       }
+
+       if (!(rps->pm_events & GEN6_PM_RP_UP_THRESHOLD)) {
+               pr_err("%s: RPS did not register UP interrupt\n",
+                      engine->name);
+               i915_request_put(rq);
+               return -EINVAL;
+       }
+
+       if (rps->last_freq != rps->min_freq) {
+               pr_err("%s: RPS did not program min frequency\n",
+                      engine->name);
+               i915_request_put(rq);
+               return -EINVAL;
+       }
+
+       timeout = intel_uncore_read(uncore, GEN6_RP_UP_EI);
+       timeout = intel_gt_pm_interval_to_ns(engine->gt, timeout);
+       timeout = DIV_ROUND_UP(timeout, 1000);
+
+       sleep_for_ei(rps, timeout);
+       GEM_BUG_ON(i915_request_completed(rq));
+
+       igt_spinner_end(spin);
+       i915_request_put(rq);
+
+       if (rps->cur_freq != rps->min_freq) {
+               pr_err("%s: Frequency unexpectedly changed [up], now %d!\n",
+                      engine->name, intel_rps_read_actual_frequency(rps));
+               return -EINVAL;
+       }
+
+       if (!(rps->pm_iir & GEN6_PM_RP_UP_THRESHOLD)) {
+               pr_err("%s: UP interrupt not recorded for spinner, pm_iir:%x, prev_up:%x, up_threshold:%x, up_ei:%x\n",
+                      engine->name, rps->pm_iir,
+                      intel_uncore_read(uncore, GEN6_RP_PREV_UP),
+                      intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD),
+                      intel_uncore_read(uncore, GEN6_RP_UP_EI));
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int __rps_down_interrupt(struct intel_rps *rps,
+                               struct intel_engine_cs *engine)
+{
+       struct intel_uncore *uncore = engine->uncore;
+       u32 timeout;
+
+       rps_set_check(rps, rps->max_freq);
+
+       if (!(rps->pm_events & GEN6_PM_RP_DOWN_THRESHOLD)) {
+               pr_err("%s: RPS did not register DOWN interrupt\n",
+                      engine->name);
+               return -EINVAL;
+       }
+
+       if (rps->last_freq != rps->max_freq) {
+               pr_err("%s: RPS did not program max frequency\n",
+                      engine->name);
+               return -EINVAL;
+       }
+
+       timeout = intel_uncore_read(uncore, GEN6_RP_DOWN_EI);
+       timeout = intel_gt_pm_interval_to_ns(engine->gt, timeout);
+       timeout = DIV_ROUND_UP(timeout, 1000);
+
+       sleep_for_ei(rps, timeout);
+
+       if (rps->cur_freq != rps->max_freq) {
+               pr_err("%s: Frequency unexpectedly changed [down], now %d!\n",
+                      engine->name,
+                      intel_rps_read_actual_frequency(rps));
+               return -EINVAL;
+       }
+
+       if (!(rps->pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT))) {
+               pr_err("%s: DOWN interrupt not recorded for idle, pm_iir:%x, prev_down:%x, down_threshold:%x, down_ei:%x [prev_up:%x, up_threshold:%x, up_ei:%x]\n",
+                      engine->name, rps->pm_iir,
+                      intel_uncore_read(uncore, GEN6_RP_PREV_DOWN),
+                      intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD),
+                      intel_uncore_read(uncore, GEN6_RP_DOWN_EI),
+                      intel_uncore_read(uncore, GEN6_RP_PREV_UP),
+                      intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD),
+                      intel_uncore_read(uncore, GEN6_RP_UP_EI));
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int live_rps_interrupt(void *arg)
+{
+       struct intel_gt *gt = arg;
+       struct intel_rps *rps = &gt->rps;
+       void (*saved_work)(struct work_struct *wrk);
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+       struct igt_spinner spin;
+       u32 pm_events;
+       int err = 0;
+
+       /*
+        * First, let's check whether or not we are receiving interrupts.
+        */
+
+       if (!intel_rps_has_interrupts(rps))
+               return 0;
+
+       intel_gt_pm_get(gt);
+       pm_events = rps->pm_events;
+       intel_gt_pm_put(gt);
+       if (!pm_events) {
+               pr_err("No RPS PM events registered, but RPS is enabled?\n");
+               return -ENODEV;
+       }
+
+       if (igt_spinner_init(&spin, gt))
+               return -ENOMEM;
+
+       intel_gt_pm_wait_for_idle(gt);
+       saved_work = rps->work.func;
+       rps->work.func = dummy_rps_work;
+
+       for_each_engine(engine, gt, id) {
+               /* Keep the engine busy with a spinner; expect an UP! */
+               if (pm_events & GEN6_PM_RP_UP_THRESHOLD) {
+                       unsigned long saved_heartbeat;
+
+                       intel_gt_pm_wait_for_idle(engine->gt);
+                       GEM_BUG_ON(intel_rps_is_active(rps));
+
+                       saved_heartbeat = engine_heartbeat_disable(engine);
+
+                       err = __rps_up_interrupt(rps, engine, &spin);
+
+                       engine_heartbeat_enable(engine, saved_heartbeat);
+                       if (err)
+                               goto out;
+
+                       intel_gt_pm_wait_for_idle(engine->gt);
+               }
+
+               /* Keep the engine awake but idle and check for DOWN */
+               if (pm_events & GEN6_PM_RP_DOWN_THRESHOLD) {
+                       unsigned long saved_heartbeat;
+
+                       saved_heartbeat = engine_heartbeat_disable(engine);
+                       intel_rc6_disable(&gt->rc6);
+
+                       err = __rps_down_interrupt(rps, engine);
+
+                       intel_rc6_enable(&gt->rc6);
+                       engine_heartbeat_enable(engine, saved_heartbeat);
+                       if (err)
+                               goto out;
+               }
+       }
+
+out:
+       if (igt_flush_test(gt->i915))
+               err = -EIO;
+
+       igt_spinner_fini(&spin);
+
+       intel_gt_pm_wait_for_idle(gt);
+       rps->work.func = saved_work;
+
+       return err;
+}
+
+static u64 __measure_power(int duration_ms)
+{
+       u64 dE, dt;
+
+       dt = ktime_get();
+       dE = librapl_energy_uJ();
+       usleep_range(1000 * duration_ms, 2000 * duration_ms);
+       dE = librapl_energy_uJ() - dE;
+       dt = ktime_get() - dt;
+
+       return div64_u64(1000 * 1000 * dE, dt);
+}
+
+static u64 measure_power_at(struct intel_rps *rps, int *freq)
+{
+       u64 x[5];
+       int i;
+
+       *freq = rps_set_check(rps, *freq);
+       for (i = 0; i < 5; i++)
+               x[i] = __measure_power(5);
+       *freq = (*freq + read_cagf(rps)) / 2;
+
+       /* A simple triangle filter for better result stability */
+       sort(x, 5, sizeof(*x), cmp_u64, NULL);
+       return div_u64(x[1] + 2 * x[2] + x[3], 4);
+}
+
+int live_rps_power(void *arg)
+{
+       struct intel_gt *gt = arg;
+       struct intel_rps *rps = &gt->rps;
+       void (*saved_work)(struct work_struct *wrk);
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+       struct igt_spinner spin;
+       int err = 0;
+
+       /*
+        * Our fundamental assumption is that running at lower frequency
+        * actually saves power. Let's see if our RAPL measurement support
+        * that theory.
+        */
+
+       if (!intel_rps_is_enabled(rps))
+               return 0;
+
+       if (!librapl_energy_uJ())
+               return 0;
+
+       if (igt_spinner_init(&spin, gt))
+               return -ENOMEM;
+
+       intel_gt_pm_wait_for_idle(gt);
+       saved_work = rps->work.func;
+       rps->work.func = dummy_rps_work;
+
+       for_each_engine(engine, gt, id) {
+               unsigned long saved_heartbeat;
+               struct i915_request *rq;
+               struct {
+                       u64 power;
+                       int freq;
+               } min, max;
+
+               if (!intel_engine_can_store_dword(engine))
+                       continue;
+
+               saved_heartbeat = engine_heartbeat_disable(engine);
+
+               rq = igt_spinner_create_request(&spin,
+                                               engine->kernel_context,
+                                               MI_NOOP);
+               if (IS_ERR(rq)) {
+                       engine_heartbeat_enable(engine, saved_heartbeat);
+                       err = PTR_ERR(rq);
+                       break;
+               }
+
+               i915_request_add(rq);
+
+               if (!igt_wait_for_spinner(&spin, rq)) {
+                       pr_err("%s: RPS spinner did not start\n",
+                              engine->name);
+                       igt_spinner_end(&spin);
+                       engine_heartbeat_enable(engine, saved_heartbeat);
+                       intel_gt_set_wedged(engine->gt);
+                       err = -EIO;
+                       break;
+               }
+
+               max.freq = rps->max_freq;
+               max.power = measure_power_at(rps, &max.freq);
+
+               min.freq = rps->min_freq;
+               min.power = measure_power_at(rps, &min.freq);
+
+               igt_spinner_end(&spin);
+               engine_heartbeat_enable(engine, saved_heartbeat);
+
+               pr_info("%s: min:%llumW @ %uMHz, max:%llumW @ %uMHz\n",
+                       engine->name,
+                       min.power, intel_gpu_freq(rps, min.freq),
+                       max.power, intel_gpu_freq(rps, max.freq));
+
+               if (10 * min.freq >= 9 * max.freq) {
+                       pr_notice("Could not control frequency, ran at [%d:%uMHz, %d:%uMhz]\n",
+                                 min.freq, intel_gpu_freq(rps, min.freq),
+                                 max.freq, intel_gpu_freq(rps, max.freq));
+                       continue;
+               }
+
+               if (11 * min.power > 10 * max.power) {
+                       pr_err("%s: did not conserve power when setting lower frequency!\n",
+                              engine->name);
+                       err = -EINVAL;
+                       break;
+               }
+
+               if (igt_flush_test(gt->i915)) {
+                       err = -EIO;
+                       break;
+               }
+       }
+
+       igt_spinner_fini(&spin);
+
+       intel_gt_pm_wait_for_idle(gt);
+       rps->work.func = saved_work;
+
+       return err;
+}
+
+int live_rps_dynamic(void *arg)
+{
+       struct intel_gt *gt = arg;
+       struct intel_rps *rps = &gt->rps;
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+       struct igt_spinner spin;
+       int err = 0;
+
+       /*
+        * We've looked at the bascs, and have established that we
+        * can change the clock frequency and that the HW will generate
+        * interrupts based on load. Now we check how we integrate those
+        * moving parts into dynamic reclocking based on load.
+        */
+
+       if (!intel_rps_is_enabled(rps))
+               return 0;
+
+       if (igt_spinner_init(&spin, gt))
+               return -ENOMEM;
+
+       for_each_engine(engine, gt, id) {
+               struct i915_request *rq;
+               struct {
+                       ktime_t dt;
+                       u8 freq;
+               } min, max;
+
+               if (!intel_engine_can_store_dword(engine))
+                       continue;
+
+               intel_gt_pm_wait_for_idle(gt);
+               GEM_BUG_ON(intel_rps_is_active(rps));
+               rps->cur_freq = rps->min_freq;
+
+               intel_engine_pm_get(engine);
+               intel_rc6_disable(&gt->rc6);
+               GEM_BUG_ON(rps->last_freq != rps->min_freq);
+
+               rq = igt_spinner_create_request(&spin,
+                                               engine->kernel_context,
+                                               MI_NOOP);
+               if (IS_ERR(rq)) {
+                       err = PTR_ERR(rq);
+                       goto err;
+               }
+
+               i915_request_add(rq);
+
+               max.dt = ktime_get();
+               max.freq = wait_for_freq(rps, rps->max_freq, 500);
+               max.dt = ktime_sub(ktime_get(), max.dt);
+
+               igt_spinner_end(&spin);
+
+               min.dt = ktime_get();
+               min.freq = wait_for_freq(rps, rps->min_freq, 2000);
+               min.dt = ktime_sub(ktime_get(), min.dt);
+
+               pr_info("%s: dynamically reclocked to %u:%uMHz while busy in %lluns, and %u:%uMHz while idle in %lluns\n",
+                       engine->name,
+                       max.freq, intel_gpu_freq(rps, max.freq),
+                       ktime_to_ns(max.dt),
+                       min.freq, intel_gpu_freq(rps, min.freq),
+                       ktime_to_ns(min.dt));
+               if (min.freq >= max.freq) {
+                       pr_err("%s: dynamic reclocking of spinner failed\n!",
+                              engine->name);
+                       err = -EINVAL;
+               }
+
+err:
+               intel_rc6_enable(&gt->rc6);
+               intel_engine_pm_put(engine);
+
+               if (igt_flush_test(gt->i915))
+                       err = -EIO;
+               if (err)
+                       break;
+       }
+
+       igt_spinner_fini(&spin);
+
+       return err;
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.h b/drivers/gpu/drm/i915/gt/selftest_rps.h
new file mode 100644 (file)
index 0000000..6e82a63
--- /dev/null
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef SELFTEST_RPS_H
+#define SELFTEST_RPS_H
+
+int live_rps_control(void *arg);
+int live_rps_clock_interval(void *arg);
+int live_rps_frequency_cs(void *arg);
+int live_rps_frequency_srm(void *arg);
+int live_rps_power(void *arg);
+int live_rps_interrupt(void *arg);
+int live_rps_dynamic(void *arg);
+
+#endif /* SELFTEST_RPS_H */
diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c
new file mode 100644 (file)
index 0000000..43c7acb
--- /dev/null
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/shmem_fs.h>
+
+#include "gem/i915_gem_object.h"
+#include "shmem_utils.h"
+
+struct file *shmem_create_from_data(const char *name, void *data, size_t len)
+{
+       struct file *file;
+       int err;
+
+       file = shmem_file_setup(name, PAGE_ALIGN(len), VM_NORESERVE);
+       if (IS_ERR(file))
+               return file;
+
+       err = shmem_write(file, 0, data, len);
+       if (err) {
+               fput(file);
+               return ERR_PTR(err);
+       }
+
+       return file;
+}
+
+struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
+{
+       struct file *file;
+       void *ptr;
+
+       if (obj->ops == &i915_gem_shmem_ops) {
+               file = obj->base.filp;
+               atomic_long_inc(&file->f_count);
+               return file;
+       }
+
+       ptr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+       if (IS_ERR(ptr))
+               return ERR_CAST(ptr);
+
+       file = shmem_create_from_data("", ptr, obj->base.size);
+       i915_gem_object_unpin_map(obj);
+
+       return file;
+}
+
+static size_t shmem_npte(struct file *file)
+{
+       return file->f_mapping->host->i_size >> PAGE_SHIFT;
+}
+
+static void __shmem_unpin_map(struct file *file, void *ptr, size_t n_pte)
+{
+       unsigned long pfn;
+
+       vunmap(ptr);
+
+       for (pfn = 0; pfn < n_pte; pfn++) {
+               struct page *page;
+
+               page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
+                                                  GFP_KERNEL);
+               if (!WARN_ON(IS_ERR(page))) {
+                       put_page(page);
+                       put_page(page);
+               }
+       }
+}
+
+void *shmem_pin_map(struct file *file)
+{
+       const size_t n_pte = shmem_npte(file);
+       pte_t *stack[32], **ptes, **mem;
+       struct vm_struct *area;
+       unsigned long pfn;
+
+       mem = stack;
+       if (n_pte > ARRAY_SIZE(stack)) {
+               mem = kvmalloc_array(n_pte, sizeof(*mem), GFP_KERNEL);
+               if (!mem)
+                       return NULL;
+       }
+
+       area = alloc_vm_area(n_pte << PAGE_SHIFT, mem);
+       if (!area) {
+               if (mem != stack)
+                       kvfree(mem);
+               return NULL;
+       }
+
+       ptes = mem;
+       for (pfn = 0; pfn < n_pte; pfn++) {
+               struct page *page;
+
+               page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
+                                                  GFP_KERNEL);
+               if (IS_ERR(page))
+                       goto err_page;
+
+               **ptes++ = mk_pte(page,  PAGE_KERNEL);
+       }
+
+       if (mem != stack)
+               kvfree(mem);
+
+       mapping_set_unevictable(file->f_mapping);
+       return area->addr;
+
+err_page:
+       if (mem != stack)
+               kvfree(mem);
+
+       __shmem_unpin_map(file, area->addr, pfn);
+       return NULL;
+}
+
+void shmem_unpin_map(struct file *file, void *ptr)
+{
+       mapping_clear_unevictable(file->f_mapping);
+       __shmem_unpin_map(file, ptr, shmem_npte(file));
+}
+
+static int __shmem_rw(struct file *file, loff_t off,
+                     void *ptr, size_t len,
+                     bool write)
+{
+       unsigned long pfn;
+
+       for (pfn = off >> PAGE_SHIFT; len; pfn++) {
+               unsigned int this =
+                       min_t(size_t, PAGE_SIZE - offset_in_page(off), len);
+               struct page *page;
+               void *vaddr;
+
+               page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
+                                                  GFP_KERNEL);
+               if (IS_ERR(page))
+                       return PTR_ERR(page);
+
+               vaddr = kmap(page);
+               if (write)
+                       memcpy(vaddr + offset_in_page(off), ptr, this);
+               else
+                       memcpy(ptr, vaddr + offset_in_page(off), this);
+               kunmap(page);
+               put_page(page);
+
+               len -= this;
+               ptr += this;
+               off = 0;
+       }
+
+       return 0;
+}
+
+int shmem_read(struct file *file, loff_t off, void *dst, size_t len)
+{
+       return __shmem_rw(file, off, dst, len, false);
+}
+
+int shmem_write(struct file *file, loff_t off, void *src, size_t len)
+{
+       return __shmem_rw(file, off, src, len, true);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "st_shmem_utils.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.h b/drivers/gpu/drm/i915/gt/shmem_utils.h
new file mode 100644 (file)
index 0000000..c166917
--- /dev/null
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef SHMEM_UTILS_H
+#define SHMEM_UTILS_H
+
+#include <linux/types.h>
+
+struct drm_i915_gem_object;
+struct file;
+
+struct file *shmem_create_from_data(const char *name, void *data, size_t len);
+struct file *shmem_create_from_object(struct drm_i915_gem_object *obj);
+
+void *shmem_pin_map(struct file *file);
+void shmem_unpin_map(struct file *file, void *ptr);
+
+int shmem_read(struct file *file, loff_t off, void *dst, size_t len);
+int shmem_write(struct file *file, loff_t off, void *src, size_t len);
+
+#endif /* SHMEM_UTILS_H */
diff --git a/drivers/gpu/drm/i915/gt/st_shmem_utils.c b/drivers/gpu/drm/i915/gt/st_shmem_utils.c
new file mode 100644 (file)
index 0000000..b279fe8
--- /dev/null
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+/* Just a quick and causal check of the shmem_utils API */
+
+static int igt_shmem_basic(void *ignored)
+{
+       u32 datum = 0xdeadbeef, result;
+       struct file *file;
+       u32 *map;
+       int err;
+
+       file = shmem_create_from_data("mock", &datum, sizeof(datum));
+       if (IS_ERR(file))
+               return PTR_ERR(file);
+
+       result = 0;
+       err = shmem_read(file, 0, &result, sizeof(result));
+       if (err)
+               goto out_file;
+
+       if (result != datum) {
+               pr_err("Incorrect read back from shmemfs: %x != %x\n",
+                      result, datum);
+               err = -EINVAL;
+               goto out_file;
+       }
+
+       result = 0xc0ffee;
+       err = shmem_write(file, 0, &result, sizeof(result));
+       if (err)
+               goto out_file;
+
+       map = shmem_pin_map(file);
+       if (!map) {
+               err = -ENOMEM;
+               goto out_file;
+       }
+
+       if (*map != result) {
+               pr_err("Incorrect read back via mmap of last write: %x != %x\n",
+                      *map, result);
+               err = -EINVAL;
+               goto out_map;
+       }
+
+out_map:
+       shmem_unpin_map(file, map);
+out_file:
+       fput(file);
+       return err;
+}
+
+int shmem_utils_mock_selftests(void)
+{
+       static const struct i915_subtest tests[] = {
+               SUBTEST(igt_shmem_basic),
+       };
+
+       return i915_subtests(tests, NULL);
+}
index 8f9b2f33dbaf5fdc9617fff003b2d2b16fdea853..535cc1169e54cc79c5e78a692543292b22164b82 100644 (file)
@@ -191,6 +191,17 @@ max_spin_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 static struct kobj_attribute max_spin_attr =
 __ATTR(max_busywait_duration_ns, 0644, max_spin_show, max_spin_store);
 
+static ssize_t
+max_spin_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+       struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+       return sprintf(buf, "%lu\n", engine->defaults.max_busywait_duration_ns);
+}
+
+static struct kobj_attribute max_spin_def =
+__ATTR(max_busywait_duration_ns, 0444, max_spin_default, NULL);
+
 static ssize_t
 timeslice_store(struct kobject *kobj, struct kobj_attribute *attr,
                const char *buf, size_t count)
@@ -233,6 +244,17 @@ timeslice_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 static struct kobj_attribute timeslice_duration_attr =
 __ATTR(timeslice_duration_ms, 0644, timeslice_show, timeslice_store);
 
+static ssize_t
+timeslice_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+       struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+       return sprintf(buf, "%lu\n", engine->defaults.timeslice_duration_ms);
+}
+
+static struct kobj_attribute timeslice_duration_def =
+__ATTR(timeslice_duration_ms, 0444, timeslice_default, NULL);
+
 static ssize_t
 stop_store(struct kobject *kobj, struct kobj_attribute *attr,
           const char *buf, size_t count)
@@ -272,6 +294,17 @@ stop_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 static struct kobj_attribute stop_timeout_attr =
 __ATTR(stop_timeout_ms, 0644, stop_show, stop_store);
 
+static ssize_t
+stop_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+       struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+       return sprintf(buf, "%lu\n", engine->defaults.stop_timeout_ms);
+}
+
+static struct kobj_attribute stop_timeout_def =
+__ATTR(stop_timeout_ms, 0444, stop_default, NULL);
+
 static ssize_t
 preempt_timeout_store(struct kobject *kobj, struct kobj_attribute *attr,
                      const char *buf, size_t count)
@@ -316,6 +349,18 @@ preempt_timeout_show(struct kobject *kobj, struct kobj_attribute *attr,
 static struct kobj_attribute preempt_timeout_attr =
 __ATTR(preempt_timeout_ms, 0644, preempt_timeout_show, preempt_timeout_store);
 
+static ssize_t
+preempt_timeout_default(struct kobject *kobj, struct kobj_attribute *attr,
+                       char *buf)
+{
+       struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+       return sprintf(buf, "%lu\n", engine->defaults.preempt_timeout_ms);
+}
+
+static struct kobj_attribute preempt_timeout_def =
+__ATTR(preempt_timeout_ms, 0444, preempt_timeout_default, NULL);
+
 static ssize_t
 heartbeat_store(struct kobject *kobj, struct kobj_attribute *attr,
                const char *buf, size_t count)
@@ -359,6 +404,17 @@ heartbeat_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 static struct kobj_attribute heartbeat_interval_attr =
 __ATTR(heartbeat_interval_ms, 0644, heartbeat_show, heartbeat_store);
 
+static ssize_t
+heartbeat_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+       struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+       return sprintf(buf, "%lu\n", engine->defaults.heartbeat_interval_ms);
+}
+
+static struct kobj_attribute heartbeat_interval_def =
+__ATTR(heartbeat_interval_ms, 0444, heartbeat_default, NULL);
+
 static void kobj_engine_release(struct kobject *kobj)
 {
        kfree(kobj);
@@ -390,6 +446,42 @@ kobj_engine(struct kobject *dir, struct intel_engine_cs *engine)
        return &ke->base;
 }
 
+static void add_defaults(struct kobj_engine *parent)
+{
+       static const struct attribute *files[] = {
+               &max_spin_def.attr,
+               &stop_timeout_def.attr,
+#if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
+               &heartbeat_interval_def.attr,
+#endif
+               NULL
+       };
+       struct kobj_engine *ke;
+
+       ke = kzalloc(sizeof(*ke), GFP_KERNEL);
+       if (!ke)
+               return;
+
+       kobject_init(&ke->base, &kobj_engine_type);
+       ke->engine = parent->engine;
+
+       if (kobject_add(&ke->base, &parent->base, "%s", ".defaults")) {
+               kobject_put(&ke->base);
+               return;
+       }
+
+       if (sysfs_create_files(&ke->base, files))
+               return;
+
+       if (intel_engine_has_timeslices(ke->engine) &&
+           sysfs_create_file(&ke->base, &timeslice_duration_def.attr))
+               return;
+
+       if (intel_engine_has_preempt_reset(ke->engine) &&
+           sysfs_create_file(&ke->base, &preempt_timeout_def.attr))
+               return;
+}
+
 void intel_engines_add_sysfs(struct drm_i915_private *i915)
 {
        static const struct attribute *files[] = {
@@ -433,6 +525,8 @@ void intel_engines_add_sysfs(struct drm_i915_private *i915)
                    sysfs_create_file(kobj, &preempt_timeout_attr.attr))
                        goto err_engine;
 
+               add_defaults(container_of(kobj, struct kobj_engine, base));
+
                if (0) {
 err_object:
                        kobject_put(kobj);
index 819f09ef51fc214e4b491458b2c8e12fd9c2a5d8..861657897c0f99f038e305a237cab592c3bebb85 100644 (file)
@@ -169,7 +169,7 @@ void intel_guc_init_early(struct intel_guc *guc)
 {
        struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
 
-       intel_guc_fw_init_early(guc);
+       intel_uc_fw_init_early(&guc->fw, INTEL_UC_FW_TYPE_GUC);
        intel_guc_ct_init_early(&guc->ct);
        intel_guc_log_init_early(&guc->log);
        intel_guc_submission_init_early(guc);
@@ -723,3 +723,47 @@ int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
 
        return 0;
 }
+
+/**
+ * intel_guc_load_status - dump information about GuC load status
+ * @guc: the GuC
+ * @p: the &drm_printer
+ *
+ * Pretty printer for GuC load status.
+ */
+void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p)
+{
+       struct intel_gt *gt = guc_to_gt(guc);
+       struct intel_uncore *uncore = gt->uncore;
+       intel_wakeref_t wakeref;
+
+       if (!intel_guc_is_supported(guc)) {
+               drm_printf(p, "GuC not supported\n");
+               return;
+       }
+
+       if (!intel_guc_is_wanted(guc)) {
+               drm_printf(p, "GuC disabled\n");
+               return;
+       }
+
+       intel_uc_fw_dump(&guc->fw, p);
+
+       with_intel_runtime_pm(uncore->rpm, wakeref) {
+               u32 status = intel_uncore_read(uncore, GUC_STATUS);
+               u32 i;
+
+               drm_printf(p, "\nGuC status 0x%08x:\n", status);
+               drm_printf(p, "\tBootrom status = 0x%x\n",
+                          (status & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
+               drm_printf(p, "\tuKernel status = 0x%x\n",
+                          (status & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
+               drm_printf(p, "\tMIA Core status = 0x%x\n",
+                          (status & GS_MIA_MASK) >> GS_MIA_SHIFT);
+               drm_puts(p, "\nScratch registers:\n");
+               for (i = 0; i < 16; i++) {
+                       drm_printf(p, "\t%2d: \t0x%x\n",
+                                  i, intel_uncore_read(uncore, SOFT_SCRATCH(i)));
+               }
+       }
+}
index 4594ccbeaa34216042854b221db8bfe2618496af..e84ab67b317d76aff9cbb676bcf948493b4a2510 100644 (file)
@@ -74,6 +74,11 @@ struct intel_guc {
        struct mutex send_mutex;
 };
 
+static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
+{
+       return container_of(log, struct intel_guc, log);
+}
+
 static
 inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
 {
@@ -190,4 +195,6 @@ static inline void intel_guc_disable_msg(struct intel_guc *guc, u32 mask)
 int intel_guc_reset_engine(struct intel_guc *guc,
                           struct intel_engine_cs *engine);
 
+void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p);
+
 #endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c
new file mode 100644 (file)
index 0000000..fe7cb7b
--- /dev/null
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <drm/drm_print.h>
+
+#include "gt/debugfs_gt.h"
+#include "intel_guc.h"
+#include "intel_guc_debugfs.h"
+#include "intel_guc_log_debugfs.h"
+
+static int guc_info_show(struct seq_file *m, void *data)
+{
+       struct intel_guc *guc = m->private;
+       struct drm_printer p = drm_seq_file_printer(m);
+
+       if (!intel_guc_is_supported(guc))
+               return -ENODEV;
+
+       intel_guc_load_status(guc, &p);
+       drm_puts(&p, "\n");
+       intel_guc_log_info(&guc->log, &p);
+
+       /* Add more as required ... */
+
+       return 0;
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(guc_info);
+
+void intel_guc_debugfs_register(struct intel_guc *guc, struct dentry *root)
+{
+       static const struct debugfs_gt_file files[] = {
+               { "guc_info", &guc_info_fops, NULL },
+       };
+
+       if (!intel_guc_is_supported(guc))
+               return;
+
+       intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), guc);
+       intel_guc_log_debugfs_register(&guc->log, root);
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.h
new file mode 100644 (file)
index 0000000..424c266
--- /dev/null
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef DEBUGFS_GUC_H
+#define DEBUGFS_GUC_H
+
+struct intel_guc;
+struct dentry;
+
+void intel_guc_debugfs_register(struct intel_guc *guc, struct dentry *root);
+
+#endif /* DEBUGFS_GUC_H */
index 3a1c47d600ea3611c8ebd3e6ce42048f04227de3..d4a87f4c9421f2fab2eafd831913536907c49eb5 100644 (file)
 #include "intel_guc_fw.h"
 #include "i915_drv.h"
 
-/**
- * intel_guc_fw_init_early() - initializes GuC firmware struct
- * @guc: intel_guc struct
- *
- * On platforms with GuC selects firmware for uploading
- */
-void intel_guc_fw_init_early(struct intel_guc *guc)
-{
-       struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
-
-       intel_uc_fw_init_early(&guc->fw, INTEL_UC_FW_TYPE_GUC, HAS_GT_UC(i915),
-                              INTEL_INFO(i915)->platform, INTEL_REVID(i915));
-}
-
 static void guc_prepare_xfer(struct intel_uncore *uncore)
 {
        u32 shim_flags = GUC_DISABLE_SRAM_INIT_TO_ZEROES |
index b5ab639d7259d56f34a5883ddd9de09daaa9d19d..0b4d2a9c94359859196ed09b5c8fe73d4643a582 100644 (file)
@@ -8,7 +8,6 @@
 
 struct intel_guc;
 
-void intel_guc_fw_init_early(struct intel_guc *guc);
 int intel_guc_fw_upload(struct intel_guc *guc);
 
 #endif
index caed0d57e704059d2d5b24b9114694b6ab686d9e..fb10f3597ea59f34b6c6fb72bf58deb003b66b69 100644 (file)
@@ -55,11 +55,6 @@ static int guc_action_control_log(struct intel_guc *guc, bool enable,
        return intel_guc_send(guc, action, ARRAY_SIZE(action));
 }
 
-static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
-{
-       return container_of(log, struct intel_guc, log);
-}
-
 static void guc_log_enable_flush_events(struct intel_guc_log *log)
 {
        intel_guc_enable_msg(log_to_guc(log),
@@ -672,3 +667,95 @@ void intel_guc_log_handle_flush_event(struct intel_guc_log *log)
 {
        queue_work(system_highpri_wq, &log->relay.flush_work);
 }
+
+static const char *
+stringify_guc_log_type(enum guc_log_buffer_type type)
+{
+       switch (type) {
+       case GUC_ISR_LOG_BUFFER:
+               return "ISR";
+       case GUC_DPC_LOG_BUFFER:
+               return "DPC";
+       case GUC_CRASH_DUMP_LOG_BUFFER:
+               return "CRASH";
+       default:
+               MISSING_CASE(type);
+       }
+
+       return "";
+}
+
+/**
+ * intel_guc_log_info - dump information about GuC log relay
+ * @log: the GuC log
+ * @p: the &drm_printer
+ *
+ * Pretty printer for GuC log info
+ */
+void intel_guc_log_info(struct intel_guc_log *log, struct drm_printer *p)
+{
+       enum guc_log_buffer_type type;
+
+       if (!intel_guc_log_relay_created(log)) {
+               drm_puts(p, "GuC log relay not created\n");
+               return;
+       }
+
+       drm_puts(p, "GuC logging stats:\n");
+
+       drm_printf(p, "\tRelay full count: %u\n", log->relay.full_count);
+
+       for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
+               drm_printf(p, "\t%s:\tflush count %10u, overflow count %10u\n",
+                          stringify_guc_log_type(type),
+                          log->stats[type].flush,
+                          log->stats[type].sampled_overflow);
+       }
+}
+
+/**
+ * intel_guc_log_dump - dump the contents of the GuC log
+ * @log: the GuC log
+ * @p: the &drm_printer
+ * @dump_load_err: dump the log saved on GuC load error
+ *
+ * Pretty printer for the GuC log
+ */
+int intel_guc_log_dump(struct intel_guc_log *log, struct drm_printer *p,
+                      bool dump_load_err)
+{
+       struct intel_guc *guc = log_to_guc(log);
+       struct intel_uc *uc = container_of(guc, struct intel_uc, guc);
+       struct drm_i915_gem_object *obj = NULL;
+       u32 *map;
+       int i = 0;
+
+       if (!intel_guc_is_supported(guc))
+               return -ENODEV;
+
+       if (dump_load_err)
+               obj = uc->load_err_log;
+       else if (guc->log.vma)
+               obj = guc->log.vma->obj;
+
+       if (!obj)
+               return 0;
+
+       map = i915_gem_object_pin_map(obj, I915_MAP_WC);
+       if (IS_ERR(map)) {
+               DRM_DEBUG("Failed to pin object\n");
+               drm_puts(p, "(log data unaccessible)\n");
+               return PTR_ERR(map);
+       }
+
+       for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
+               drm_printf(p, "0x%08x 0x%08x 0x%08x 0x%08x\n",
+                          *(map + i), *(map + i + 1),
+                          *(map + i + 2), *(map + i + 3));
+
+       drm_puts(p, "\n");
+
+       i915_gem_object_unpin_map(obj);
+
+       return 0;
+}
index c252c022c5fcba34a4c751a84515f994181f3b17..11fccd0b2294f6263f93ff1e6f6e495a95856f90 100644 (file)
@@ -79,4 +79,8 @@ static inline u32 intel_guc_log_get_level(struct intel_guc_log *log)
        return log->level;
 }
 
+void intel_guc_log_info(struct intel_guc_log *log, struct drm_printer *p);
+int intel_guc_log_dump(struct intel_guc_log *log, struct drm_printer *p,
+                      bool dump_load_err);
+
 #endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c
new file mode 100644 (file)
index 0000000..129e0cf
--- /dev/null
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/fs.h>
+#include <drm/drm_print.h>
+
+#include "gt/debugfs_gt.h"
+#include "intel_guc.h"
+#include "intel_guc_log.h"
+#include "intel_guc_log_debugfs.h"
+
+static int guc_log_dump_show(struct seq_file *m, void *data)
+{
+       struct drm_printer p = drm_seq_file_printer(m);
+
+       return intel_guc_log_dump(m->private, &p, false);
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(guc_log_dump);
+
+static int guc_load_err_log_dump_show(struct seq_file *m, void *data)
+{
+       struct drm_printer p = drm_seq_file_printer(m);
+
+       return intel_guc_log_dump(m->private, &p, true);
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(guc_load_err_log_dump);
+
+static int guc_log_level_get(void *data, u64 *val)
+{
+       struct intel_guc_log *log = data;
+
+       if (!intel_guc_is_used(log_to_guc(log)))
+               return -ENODEV;
+
+       *val = intel_guc_log_get_level(log);
+
+       return 0;
+}
+
+static int guc_log_level_set(void *data, u64 val)
+{
+       struct intel_guc_log *log = data;
+
+       if (!intel_guc_is_used(log_to_guc(log)))
+               return -ENODEV;
+
+       return intel_guc_log_set_level(log, val);
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(guc_log_level_fops,
+                       guc_log_level_get, guc_log_level_set,
+                       "%lld\n");
+
+static int guc_log_relay_open(struct inode *inode, struct file *file)
+{
+       struct intel_guc_log *log = inode->i_private;
+
+       if (!intel_guc_is_ready(log_to_guc(log)))
+               return -ENODEV;
+
+       file->private_data = log;
+
+       return intel_guc_log_relay_open(log);
+}
+
+static ssize_t
+guc_log_relay_write(struct file *filp,
+                   const char __user *ubuf,
+                   size_t cnt,
+                   loff_t *ppos)
+{
+       struct intel_guc_log *log = filp->private_data;
+       int val;
+       int ret;
+
+       ret = kstrtoint_from_user(ubuf, cnt, 0, &val);
+       if (ret < 0)
+               return ret;
+
+       /*
+        * Enable and start the guc log relay on value of 1.
+        * Flush log relay for any other value.
+        */
+       if (val == 1)
+               ret = intel_guc_log_relay_start(log);
+       else
+               intel_guc_log_relay_flush(log);
+
+       return ret ?: cnt;
+}
+
+static int guc_log_relay_release(struct inode *inode, struct file *file)
+{
+       struct intel_guc_log *log = inode->i_private;
+
+       intel_guc_log_relay_close(log);
+       return 0;
+}
+
+static const struct file_operations guc_log_relay_fops = {
+       .owner = THIS_MODULE,
+       .open = guc_log_relay_open,
+       .write = guc_log_relay_write,
+       .release = guc_log_relay_release,
+};
+
+void intel_guc_log_debugfs_register(struct intel_guc_log *log,
+                                   struct dentry *root)
+{
+       static const struct debugfs_gt_file files[] = {
+               { "guc_log_dump", &guc_log_dump_fops, NULL },
+               { "guc_load_err_log_dump", &guc_load_err_log_dump_fops, NULL },
+               { "guc_log_level", &guc_log_level_fops, NULL },
+               { "guc_log_relay", &guc_log_relay_fops, NULL },
+       };
+
+       if (!intel_guc_is_supported(log_to_guc(log)))
+               return;
+
+       intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), log);
+}
+
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.h
new file mode 100644 (file)
index 0000000..e8900e3
--- /dev/null
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef DEBUGFS_GUC_LOG_H
+#define DEBUGFS_GUC_LOG_H
+
+struct intel_guc_log;
+struct dentry;
+
+void intel_guc_log_debugfs_register(struct intel_guc_log *log,
+                                   struct dentry *root);
+
+#endif /* DEBUGFS_GUC_LOG_H */
index aa6d56e25a10ae3fa6b3a5d26a16e0d45bd69917..94eb63f309cefb954fc3bfc103d88f618201fda8 100644 (file)
@@ -258,7 +258,7 @@ static void guc_submit(struct intel_engine_cs *engine,
 
 static inline int rq_prio(const struct i915_request *rq)
 {
-       return rq->sched.attr.priority | __NO_PREEMPTION;
+       return rq->sched.attr.priority;
 }
 
 static struct i915_request *schedule_in(struct i915_request *rq, int idx)
index a74b65694512f648c83a7d69a45a5c7561541d69..65eeb44b397d88e6acd8db0af09d7fd5f980998f 100644 (file)
@@ -41,7 +41,7 @@ void intel_huc_init_early(struct intel_huc *huc)
 {
        struct drm_i915_private *i915 = huc_to_gt(huc)->i915;
 
-       intel_huc_fw_init_early(huc);
+       intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC);
 
        if (INTEL_GEN(i915) >= 11) {
                huc->status.reg = GEN11_HUC_KERNEL_LOAD_INFO;
@@ -200,9 +200,13 @@ fail:
  * This function reads status register to verify if HuC
  * firmware was successfully loaded.
  *
- * Returns: 1 if HuC firmware is loaded and verified,
- * 0 if HuC firmware is not loaded and -ENODEV if HuC
- * is not present on this platform.
+ * Returns:
+ *  * -ENODEV if HuC is not present on this platform,
+ *  * -EOPNOTSUPP if HuC firmware is disabled,
+ *  * -ENOPKG if HuC firmware was not installed,
+ *  * -ENOEXEC if HuC firmware is invalid or mismatched,
+ *  * 0 if HuC firmware is not running,
+ *  * 1 if HuC firmware is authenticated and running.
  */
 int intel_huc_check_status(struct intel_huc *huc)
 {
@@ -210,11 +214,50 @@ int intel_huc_check_status(struct intel_huc *huc)
        intel_wakeref_t wakeref;
        u32 status = 0;
 
-       if (!intel_huc_is_supported(huc))
+       switch (__intel_uc_fw_status(&huc->fw)) {
+       case INTEL_UC_FIRMWARE_NOT_SUPPORTED:
                return -ENODEV;
+       case INTEL_UC_FIRMWARE_DISABLED:
+               return -EOPNOTSUPP;
+       case INTEL_UC_FIRMWARE_MISSING:
+               return -ENOPKG;
+       case INTEL_UC_FIRMWARE_ERROR:
+               return -ENOEXEC;
+       default:
+               break;
+       }
 
        with_intel_runtime_pm(gt->uncore->rpm, wakeref)
                status = intel_uncore_read(gt->uncore, huc->status.reg);
 
        return (status & huc->status.mask) == huc->status.value;
 }
+
+/**
+ * intel_huc_load_status - dump information about HuC load status
+ * @huc: the HuC
+ * @p: the &drm_printer
+ *
+ * Pretty printer for HuC load status.
+ */
+void intel_huc_load_status(struct intel_huc *huc, struct drm_printer *p)
+{
+       struct intel_gt *gt = huc_to_gt(huc);
+       intel_wakeref_t wakeref;
+
+       if (!intel_huc_is_supported(huc)) {
+               drm_printf(p, "HuC not supported\n");
+               return;
+       }
+
+       if (!intel_huc_is_wanted(huc)) {
+               drm_printf(p, "HuC disabled\n");
+               return;
+       }
+
+       intel_uc_fw_dump(&huc->fw, p);
+
+       with_intel_runtime_pm(gt->uncore->rpm, wakeref)
+               drm_printf(p, "HuC status: 0x%08x\n",
+                          intel_uncore_read(gt->uncore, huc->status.reg));
+}
index a40b9cfc6c221262fd767296a200f0d4a068a71b..daee43b661d4cfedd78e6637cd22536bf0b658f9 100644 (file)
@@ -57,4 +57,6 @@ static inline bool intel_huc_is_authenticated(struct intel_huc *huc)
        return intel_uc_fw_is_running(&huc->fw);
 }
 
+void intel_huc_load_status(struct intel_huc *huc, struct drm_printer *p);
+
 #endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.c
new file mode 100644 (file)
index 0000000..5733c15
--- /dev/null
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <drm/drm_print.h>
+
+#include "gt/debugfs_gt.h"
+#include "intel_huc.h"
+#include "intel_huc_debugfs.h"
+
+static int huc_info_show(struct seq_file *m, void *data)
+{
+       struct intel_huc *huc = m->private;
+       struct drm_printer p = drm_seq_file_printer(m);
+
+       if (!intel_huc_is_supported(huc))
+               return -ENODEV;
+
+       intel_huc_load_status(huc, &p);
+
+       return 0;
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(huc_info);
+
+void intel_huc_debugfs_register(struct intel_huc *huc, struct dentry *root)
+{
+       static const struct debugfs_gt_file files[] = {
+               { "huc_info", &huc_info_fops, NULL },
+       };
+
+       if (!intel_huc_is_supported(huc))
+               return;
+
+       intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), huc);
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.h b/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.h
new file mode 100644 (file)
index 0000000..be79e99
--- /dev/null
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef DEBUGFS_HUC_H
+#define DEBUGFS_HUC_H
+
+struct intel_huc;
+struct dentry;
+
+void intel_huc_debugfs_register(struct intel_huc *huc, struct dentry *root);
+
+#endif /* DEBUGFS_HUC_H */
index 9cdf4cbe691c96c248c630573c26f75a59bd862b..e5ef509c70e8944712a67431c1a21b0f084ae099 100644 (file)
@@ -7,23 +7,6 @@
 #include "intel_huc_fw.h"
 #include "i915_drv.h"
 
-/**
- * intel_huc_fw_init_early() - initializes HuC firmware struct
- * @huc: intel_huc struct
- *
- * On platforms with HuC selects firmware for uploading
- */
-void intel_huc_fw_init_early(struct intel_huc *huc)
-{
-       struct intel_gt *gt = huc_to_gt(huc);
-       struct intel_uc *uc = &gt->uc;
-       struct drm_i915_private *i915 = gt->i915;
-
-       intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC,
-                              intel_uc_wants_guc(uc),
-                              INTEL_INFO(i915)->platform, INTEL_REVID(i915));
-}
-
 /**
  * intel_huc_fw_upload() - load HuC uCode to device
  * @huc: intel_huc structure
index b791269ce923eb96194add464b67434face0320c..12f264ee3e0b366773c631b842aa569107f5cfbe 100644 (file)
@@ -8,7 +8,6 @@
 
 struct intel_huc;
 
-void intel_huc_fw_init_early(struct intel_huc *huc);
 int intel_huc_fw_upload(struct intel_huc *huc);
 
 #endif
index a4cbe06e06bd2c95348d468c3ed0fc68970ac333..f518fe05c6f9a79396ba52aa217db45a2cde1b6e 100644 (file)
@@ -45,12 +45,12 @@ static void __confirm_options(struct intel_uc *uc)
 {
        struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
 
-       DRM_DEV_DEBUG_DRIVER(i915->drm.dev,
-                            "enable_guc=%d (guc:%s submission:%s huc:%s)\n",
-                            i915_modparams.enable_guc,
-                            yesno(intel_uc_wants_guc(uc)),
-                            yesno(intel_uc_wants_guc_submission(uc)),
-                            yesno(intel_uc_wants_huc(uc)));
+       drm_dbg(&i915->drm,
+               "enable_guc=%d (guc:%s submission:%s huc:%s)\n",
+               i915_modparams.enable_guc,
+               yesno(intel_uc_wants_guc(uc)),
+               yesno(intel_uc_wants_guc_submission(uc)),
+               yesno(intel_uc_wants_huc(uc)));
 
        if (i915_modparams.enable_guc == -1)
                return;
@@ -63,25 +63,25 @@ static void __confirm_options(struct intel_uc *uc)
        }
 
        if (!intel_uc_supports_guc(uc))
-               dev_info(i915->drm.dev,
+               drm_info(&i915->drm,
                         "Incompatible option enable_guc=%d - %s\n",
                         i915_modparams.enable_guc, "GuC is not supported!");
 
        if (i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC &&
            !intel_uc_supports_huc(uc))
-               dev_info(i915->drm.dev,
+               drm_info(&i915->drm,
                         "Incompatible option enable_guc=%d - %s\n",
                         i915_modparams.enable_guc, "HuC is not supported!");
 
        if (i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION &&
            !intel_uc_supports_guc_submission(uc))
-               dev_info(i915->drm.dev,
+               drm_info(&i915->drm,
                         "Incompatible option enable_guc=%d - %s\n",
                         i915_modparams.enable_guc, "GuC submission is N/A");
 
        if (i915_modparams.enable_guc & ~(ENABLE_GUC_SUBMISSION |
                                          ENABLE_GUC_LOAD_HUC))
-               dev_info(i915->drm.dev,
+               drm_info(&i915->drm,
                         "Incompatible option enable_guc=%d - %s\n",
                         i915_modparams.enable_guc, "undocumented flag");
 }
@@ -131,6 +131,13 @@ static void __uc_free_load_err_log(struct intel_uc *uc)
                i915_gem_object_put(log);
 }
 
+void intel_uc_driver_remove(struct intel_uc *uc)
+{
+       intel_uc_fini_hw(uc);
+       intel_uc_fini(uc);
+       __uc_free_load_err_log(uc);
+}
+
 static inline bool guc_communication_enabled(struct intel_guc *guc)
 {
        return intel_guc_ct_enabled(&guc->ct);
@@ -311,8 +318,6 @@ static void __uc_fini(struct intel_uc *uc)
 {
        intel_huc_fini(&uc->huc);
        intel_guc_fini(&uc->guc);
-
-       __uc_free_load_err_log(uc);
 }
 
 static int __uc_sanitize(struct intel_uc *uc)
@@ -475,14 +480,14 @@ static int __uc_init_hw(struct intel_uc *uc)
        if (intel_uc_uses_guc_submission(uc))
                intel_guc_submission_enable(guc);
 
-       dev_info(i915->drm.dev, "%s firmware %s version %u.%u %s:%s\n",
+       drm_info(&i915->drm, "%s firmware %s version %u.%u %s:%s\n",
                 intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_GUC), guc->fw.path,
                 guc->fw.major_ver_found, guc->fw.minor_ver_found,
                 "submission",
                 enableddisabled(intel_uc_uses_guc_submission(uc)));
 
        if (intel_uc_uses_huc(uc)) {
-               dev_info(i915->drm.dev, "%s firmware %s version %u.%u %s:%s\n",
+               drm_info(&i915->drm, "%s firmware %s version %u.%u %s:%s\n",
                         intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_HUC),
                         huc->fw.path,
                         huc->fw.major_ver_found, huc->fw.minor_ver_found,
@@ -503,7 +508,7 @@ err_out:
        __uc_sanitize(uc);
 
        if (!ret) {
-               dev_notice(i915->drm.dev, "GuC is uninitialized\n");
+               drm_notice(&i915->drm, "GuC is uninitialized\n");
                /* We want to run without GuC submission */
                return 0;
        }
index 5ae7b50b7dc1dfc0dbcd027c5d78b8ae1706bf3b..9c954c589edf2d727ff6698f32e88d347a6e2e3a 100644 (file)
@@ -34,6 +34,7 @@ struct intel_uc {
 
 void intel_uc_init_early(struct intel_uc *uc);
 void intel_uc_driver_late_release(struct intel_uc *uc);
+void intel_uc_driver_remove(struct intel_uc *uc);
 void intel_uc_init_mmio(struct intel_uc *uc);
 void intel_uc_reset_prepare(struct intel_uc *uc);
 void intel_uc_suspend(struct intel_uc *uc);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c
new file mode 100644 (file)
index 0000000..9d16b78
--- /dev/null
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/debugfs.h>
+
+#include "intel_guc_debugfs.h"
+#include "intel_huc_debugfs.h"
+#include "intel_uc.h"
+#include "intel_uc_debugfs.h"
+
+void intel_uc_debugfs_register(struct intel_uc *uc, struct dentry *gt_root)
+{
+       struct dentry *root;
+
+       if (!gt_root)
+               return;
+
+       /* GuC and HuC go always in pair, no need to check both */
+       if (!intel_uc_supports_guc(uc))
+               return;
+
+       root = debugfs_create_dir("uc", gt_root);
+       if (IS_ERR(root))
+               return;
+
+       intel_guc_debugfs_register(&uc->guc, root);
+       intel_huc_debugfs_register(&uc->huc, root);
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.h
new file mode 100644 (file)
index 0000000..010ce25
--- /dev/null
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef DEBUGFS_UC_H
+#define DEBUGFS_UC_H
+
+struct intel_uc;
+struct dentry;
+
+void intel_uc_debugfs_register(struct intel_uc *uc, struct dentry *gt_root);
+
+#endif /* DEBUGFS_UC_H */
index 18c7552036885495aec72be41ced7da2d730c6f5..e1caae93996d5e1486e2d6275903fba361ea82c7 100644 (file)
 #include "intel_uc_fw_abi.h"
 #include "i915_drv.h"
 
-static inline struct intel_gt *__uc_fw_to_gt(struct intel_uc_fw *uc_fw)
+static inline struct intel_gt *
+____uc_fw_to_gt(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type)
 {
-       GEM_BUG_ON(uc_fw->status == INTEL_UC_FIRMWARE_UNINITIALIZED);
-       if (uc_fw->type == INTEL_UC_FW_TYPE_GUC)
+       if (type == INTEL_UC_FW_TYPE_GUC)
                return container_of(uc_fw, struct intel_gt, uc.guc.fw);
 
-       GEM_BUG_ON(uc_fw->type != INTEL_UC_FW_TYPE_HUC);
+       GEM_BUG_ON(type != INTEL_UC_FW_TYPE_HUC);
        return container_of(uc_fw, struct intel_gt, uc.huc.fw);
 }
 
+static inline struct intel_gt *__uc_fw_to_gt(struct intel_uc_fw *uc_fw)
+{
+       GEM_BUG_ON(uc_fw->status == INTEL_UC_FIRMWARE_UNINITIALIZED);
+       return ____uc_fw_to_gt(uc_fw, uc_fw->type);
+}
+
 #ifdef CONFIG_DRM_I915_DEBUG_GUC
 void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
                               enum intel_uc_fw_status status)
 {
        uc_fw->__status =  status;
-       DRM_DEV_DEBUG_DRIVER(__uc_fw_to_gt(uc_fw)->i915->drm.dev,
-                            "%s firmware -> %s\n",
-                            intel_uc_fw_type_repr(uc_fw->type),
-                            status == INTEL_UC_FIRMWARE_SELECTED ?
-                            uc_fw->path : intel_uc_fw_status_repr(status));
+       drm_dbg(&__uc_fw_to_gt(uc_fw)->i915->drm,
+               "%s firmware -> %s\n",
+               intel_uc_fw_type_repr(uc_fw->type),
+               status == INTEL_UC_FIRMWARE_SELECTED ?
+               uc_fw->path : intel_uc_fw_status_repr(status));
 }
 #endif
 
@@ -187,17 +193,15 @@ static void __uc_fw_user_override(struct intel_uc_fw *uc_fw)
  * intel_uc_fw_init_early - initialize the uC object and select the firmware
  * @uc_fw: uC firmware
  * @type: type of uC
- * @supported: is uC support possible
- * @platform: platform identifier
- * @rev: hardware revision
  *
  * Initialize the state of our uC object and relevant tracking and select the
  * firmware to fetch and load.
  */
 void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
-                           enum intel_uc_fw_type type, bool supported,
-                           enum intel_platform platform, u8 rev)
+                           enum intel_uc_fw_type type)
 {
+       struct drm_i915_private *i915 = ____uc_fw_to_gt(uc_fw, type)->i915;
+
        /*
         * we use FIRMWARE_UNINITIALIZED to detect checks against uc_fw->status
         * before we're looked at the HW caps to see if we have uc support
@@ -208,8 +212,10 @@ void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
 
        uc_fw->type = type;
 
-       if (supported) {
-               __uc_fw_auto_select(uc_fw, platform, rev);
+       if (HAS_GT_UC(i915)) {
+               __uc_fw_auto_select(uc_fw,
+                                   INTEL_INFO(i915)->platform,
+                                   INTEL_REVID(i915));
                __uc_fw_user_override(uc_fw);
        }
 
@@ -290,7 +296,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
 
        /* Check the size of the blob before examining buffer contents */
        if (unlikely(fw->size < sizeof(struct uc_css_header))) {
-               dev_warn(dev, "%s firmware %s: invalid size: %zu < %zu\n",
+               drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
                         intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
                         fw->size, sizeof(struct uc_css_header));
                err = -ENODATA;
@@ -303,7 +309,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
        size = (css->header_size_dw - css->key_size_dw - css->modulus_size_dw -
                css->exponent_size_dw) * sizeof(u32);
        if (unlikely(size != sizeof(struct uc_css_header))) {
-               dev_warn(dev,
+               drm_warn(&i915->drm,
                         "%s firmware %s: unexpected header size: %zu != %zu\n",
                         intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
                         fw->size, sizeof(struct uc_css_header));
@@ -316,7 +322,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
 
        /* now RSA */
        if (unlikely(css->key_size_dw != UOS_RSA_SCRATCH_COUNT)) {
-               dev_warn(dev, "%s firmware %s: unexpected key size: %u != %u\n",
+               drm_warn(&i915->drm, "%s firmware %s: unexpected key size: %u != %u\n",
                         intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
                         css->key_size_dw, UOS_RSA_SCRATCH_COUNT);
                err = -EPROTO;
@@ -327,7 +333,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
        /* At least, it should have header, uCode and RSA. Size of all three. */
        size = sizeof(struct uc_css_header) + uc_fw->ucode_size + uc_fw->rsa_size;
        if (unlikely(fw->size < size)) {
-               dev_warn(dev, "%s firmware %s: invalid size: %zu < %zu\n",
+               drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
                         intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
                         fw->size, size);
                err = -ENOEXEC;
@@ -337,7 +343,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
        /* Sanity check whether this fw is not larger than whole WOPCM memory */
        size = __intel_uc_fw_get_upload_size(uc_fw);
        if (unlikely(size >= i915->wopcm.size)) {
-               dev_warn(dev, "%s firmware %s: invalid size: %zu > %zu\n",
+               drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu > %zu\n",
                         intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
                         size, (size_t)i915->wopcm.size);
                err = -E2BIG;
@@ -352,7 +358,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
 
        if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
            uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
-               dev_notice(dev, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
+               drm_notice(&i915->drm, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
                           intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
                           uc_fw->major_ver_found, uc_fw->minor_ver_found,
                           uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
@@ -380,9 +386,9 @@ fail:
                                  INTEL_UC_FIRMWARE_MISSING :
                                  INTEL_UC_FIRMWARE_ERROR);
 
-       dev_notice(dev, "%s firmware %s: fetch failed with error %d\n",
+       drm_notice(&i915->drm, "%s firmware %s: fetch failed with error %d\n",
                   intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
-       dev_info(dev, "%s firmware(s) can be downloaded from %s\n",
+       drm_info(&i915->drm, "%s firmware(s) can be downloaded from %s\n",
                 intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
 
        release_firmware(fw);           /* OK even if fw is NULL */
@@ -467,7 +473,7 @@ static int uc_fw_xfer(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
        /* Wait for DMA to finish */
        ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100);
        if (ret)
-               dev_err(gt->i915->drm.dev, "DMA for %s fw failed, DMA_CTRL=%u\n",
+               drm_err(&gt->i915->drm, "DMA for %s fw failed, DMA_CTRL=%u\n",
                        intel_uc_fw_type_repr(uc_fw->type),
                        intel_uncore_read_fw(uncore, DMA_CTRL));
 
index 888ff0de0244b5af4347b9d9ddaf3155d1905657..23d3a423ac0fd4d3d140a29d8fe51e40f0875f6e 100644 (file)
@@ -239,8 +239,7 @@ static inline u32 intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw)
 }
 
 void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
-                           enum intel_uc_fw_type type, bool supported,
-                           enum intel_platform platform, u8 rev);
+                           enum intel_uc_fw_type type);
 int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw);
 void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw);
 int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 offset, u32 dma_flags);
index 8b13f091cee20b528695716e8631a8386424b049..0d6d5987130821926002fbbf9d1f85bf43196788 100644 (file)
@@ -35,7 +35,7 @@
  */
 
 #include "i915_drv.h"
-#include "i915_gem_fence_reg.h"
+#include "gt/intel_ggtt_fencing.h"
 #include "gvt.h"
 
 static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
index a3cc080a46c694d4c053ac49dd70fdc7616dd0df..8b87f130f7f165c086f36ba304024649d86f79e9 100644 (file)
@@ -882,6 +882,47 @@ static int mocs_cmd_reg_handler(struct parser_exec_state *s,
        return 0;
 }
 
+static int is_cmd_update_pdps(unsigned int offset,
+                             struct parser_exec_state *s)
+{
+       u32 base = s->workload->engine->mmio_base;
+       return i915_mmio_reg_equal(_MMIO(offset), GEN8_RING_PDP_UDW(base, 0));
+}
+
+static int cmd_pdp_mmio_update_handler(struct parser_exec_state *s,
+                                      unsigned int offset, unsigned int index)
+{
+       struct intel_vgpu *vgpu = s->vgpu;
+       struct intel_vgpu_mm *shadow_mm = s->workload->shadow_mm;
+       struct intel_vgpu_mm *mm;
+       u64 pdps[GEN8_3LVL_PDPES];
+
+       if (shadow_mm->ppgtt_mm.root_entry_type ==
+           GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
+               pdps[0] = (u64)cmd_val(s, 2) << 32;
+               pdps[0] |= cmd_val(s, 4);
+
+               mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
+               if (!mm) {
+                       gvt_vgpu_err("failed to get the 4-level shadow vm\n");
+                       return -EINVAL;
+               }
+               intel_vgpu_mm_get(mm);
+               list_add_tail(&mm->ppgtt_mm.link,
+                             &s->workload->lri_shadow_mm);
+               *cmd_ptr(s, 2) = upper_32_bits(mm->ppgtt_mm.shadow_pdps[0]);
+               *cmd_ptr(s, 4) = lower_32_bits(mm->ppgtt_mm.shadow_pdps[0]);
+       } else {
+               /* Currently all guests use PML4 table and now can't
+                * have a guest with 3-level table but uses LRI for
+                * PPGTT update. So this is simply un-testable. */
+               GEM_BUG_ON(1);
+               gvt_vgpu_err("invalid shared shadow vm type\n");
+               return -EINVAL;
+       }
+       return 0;
+}
+
 static int cmd_reg_handler(struct parser_exec_state *s,
        unsigned int offset, unsigned int index, char *cmd)
 {
@@ -920,6 +961,10 @@ static int cmd_reg_handler(struct parser_exec_state *s,
                patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
        }
 
+       if (is_cmd_update_pdps(offset, s) &&
+           cmd_pdp_mmio_update_handler(s, offset, index))
+               return -EINVAL;
+
        /* TODO
         * In order to let workload with inhibit context to generate
         * correct image data into memory, vregs values will be loaded to
index dd25c3024370edcb1e52354233dbc8d95cce789f..158873f269b1b24d6cd31804d5f36355cb29bf9b 100644 (file)
@@ -424,8 +424,6 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
 
        ret = emulate_execlist_ctx_schedule_out(execlist, &workload->ctx_desc);
 out:
-       intel_vgpu_unpin_mm(workload->shadow_mm);
-       intel_vgpu_destroy_workload(workload);
        return ret;
 }
 
index 2a4b23f8aa740e7b6a17d2650509e8ae34456aa2..210016192ce704036608a04d05ceaa79a7548d09 100644 (file)
@@ -1900,6 +1900,7 @@ struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
 
        INIT_LIST_HEAD(&mm->ppgtt_mm.list);
        INIT_LIST_HEAD(&mm->ppgtt_mm.lru_list);
+       INIT_LIST_HEAD(&mm->ppgtt_mm.link);
 
        if (root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
                mm->ppgtt_mm.guest_pdps[0] = pdps[0];
@@ -2341,12 +2342,27 @@ int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
 {
        const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
        int ret;
+       struct intel_vgpu_submission *s = &vgpu->submission;
+       struct intel_engine_cs *engine;
+       int i;
 
        if (bytes != 4 && bytes != 8)
                return -EINVAL;
 
        off -= info->gtt_start_offset;
        ret = emulate_ggtt_mmio_write(vgpu, off, p_data, bytes);
+
+       /* if ggtt of last submitted context is written,
+        * that context is probably got unpinned.
+        * Set last shadowed ctx to invalid.
+        */
+       for_each_engine(engine, vgpu->gvt->gt, i) {
+               if (!s->last_ctx[i].valid)
+                       continue;
+
+               if (s->last_ctx[i].lrca == (off >> info->gtt_entry_size_shift))
+                       s->last_ctx[i].valid = false;
+       }
        return ret;
 }
 
index 88789316807d31ceff670073f077ffe910558fd0..320b8d6ad92fa4e0f7ba6bd0f1d34c0435cfc44c 100644 (file)
@@ -160,6 +160,7 @@ struct intel_vgpu_mm {
 
                        struct list_head list;
                        struct list_head lru_list;
+                       struct list_head link; /* possible LRI shadow mm list */
                } ppgtt_mm;
                struct {
                        void *virtual_ggtt;
index 9e1787867894dd39951304578a34d0d39cde1d09..c7c5612378832602944be4db2942336759ec549f 100644 (file)
@@ -31,7 +31,6 @@
  */
 
 #include <linux/types.h>
-#include <xen/xen.h>
 #include <linux/kthread.h>
 
 #include "i915_drv.h"
index 58c2c7932e3f3903a1669fb4ca6e9c759979e52d..a4a6db6b7f9087c96be422b73a2e9f2f542ca6db 100644 (file)
@@ -163,6 +163,11 @@ struct intel_vgpu_submission {
        const struct intel_vgpu_submission_ops *ops;
        int virtual_submission_interface;
        bool active;
+       struct {
+               u32 lrca;
+               bool valid;
+               u64 ring_context_gpa;
+       } last_ctx[I915_NUM_ENGINES];
 };
 
 struct intel_vgpu {
index 2faf50e1b051124208cf72d75d458f402acf5421..3e88e3b5c43ad47707bb0c160cdd45f138744009 100644 (file)
@@ -2812,7 +2812,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
        MMIO_D(GAMTARBMODE, D_BDW_PLUS);
 
 #define RING_REG(base) _MMIO((base) + 0x270)
-       MMIO_RING_F(RING_REG, 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
+       MMIO_RING_F(RING_REG, 32, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
 #undef RING_REG
 
        MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, hws_pga_write);
index b17c4a1599cd57872191084a9fa7d9c3140d326b..b79da5124f831cc4f4ad843545b9b031d0643529 100644 (file)
@@ -79,6 +79,4 @@ struct intel_gvt_mpt {
        bool (*is_valid_gfn)(unsigned long handle, unsigned long gfn);
 };
 
-extern struct intel_gvt_mpt xengt_mpt;
-
 #endif /* _GVT_HYPERCALL_H_ */
index e92ed96c9b239422776f18b7132f818fa04162fd..0fb1df71c637c088ad8292275ed6388dd7f85f6c 100644 (file)
@@ -58,10 +58,8 @@ static void set_context_pdp_root_pointer(
 
 static void update_shadow_pdps(struct intel_vgpu_workload *workload)
 {
-       struct drm_i915_gem_object *ctx_obj =
-               workload->req->context->state->obj;
        struct execlist_ring_context *shadow_ring_context;
-       struct page *page;
+       struct intel_context *ctx = workload->req->context;
 
        if (WARN_ON(!workload->shadow_mm))
                return;
@@ -69,11 +67,9 @@ static void update_shadow_pdps(struct intel_vgpu_workload *workload)
        if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount)))
                return;
 
-       page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
-       shadow_ring_context = kmap(page);
+       shadow_ring_context = (struct execlist_ring_context *)ctx->lrc_reg_state;
        set_context_pdp_root_pointer(shadow_ring_context,
                        (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
-       kunmap(page);
 }
 
 /*
@@ -128,16 +124,24 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
 {
        struct intel_vgpu *vgpu = workload->vgpu;
        struct intel_gvt *gvt = vgpu->gvt;
-       struct drm_i915_gem_object *ctx_obj =
-               workload->req->context->state->obj;
+       struct intel_context *ctx = workload->req->context;
        struct execlist_ring_context *shadow_ring_context;
-       struct page *page;
        void *dst;
+       void *context_base;
        unsigned long context_gpa, context_page_num;
+       unsigned long gpa_base; /* first gpa of consecutive GPAs */
+       unsigned long gpa_size; /* size of consecutive GPAs */
+       struct intel_vgpu_submission *s = &vgpu->submission;
        int i;
+       bool skip = false;
+       int ring_id = workload->engine->id;
+
+       GEM_BUG_ON(!intel_context_is_pinned(ctx));
+
+       context_base = (void *) ctx->lrc_reg_state -
+                               (LRC_STATE_PN << I915_GTT_PAGE_SHIFT);
 
-       page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
-       shadow_ring_context = kmap(page);
+       shadow_ring_context = (void *) ctx->lrc_reg_state;
 
        sr_oa_regs(workload, (u32 *)shadow_ring_context, true);
 #define COPY_REG(name) \
@@ -169,23 +173,43 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
                        I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
 
        sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
-       kunmap(page);
 
-       if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val))
-               return 0;
+       gvt_dbg_sched("ring %s workload lrca %x, ctx_id %x, ctx gpa %llx",
+                       workload->engine->name, workload->ctx_desc.lrca,
+                       workload->ctx_desc.context_id,
+                       workload->ring_context_gpa);
 
-       gvt_dbg_sched("ring %s workload lrca %x",
-                     workload->engine->name,
-                     workload->ctx_desc.lrca);
+       /* only need to ensure this context is not pinned/unpinned during the
+        * period from last submission to this this submission.
+        * Upon reaching this function, the currently submitted context is not
+        * supposed to get unpinned. If a misbehaving guest driver ever does
+        * this, it would corrupt itself.
+        */
+       if (s->last_ctx[ring_id].valid &&
+                       (s->last_ctx[ring_id].lrca ==
+                               workload->ctx_desc.lrca) &&
+                       (s->last_ctx[ring_id].ring_context_gpa ==
+                               workload->ring_context_gpa))
+               skip = true;
+
+       s->last_ctx[ring_id].lrca = workload->ctx_desc.lrca;
+       s->last_ctx[ring_id].ring_context_gpa = workload->ring_context_gpa;
 
+       if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val) || skip)
+               return 0;
+
+       s->last_ctx[ring_id].valid = false;
        context_page_num = workload->engine->context_size;
        context_page_num = context_page_num >> PAGE_SHIFT;
 
        if (IS_BROADWELL(gvt->gt->i915) && workload->engine->id == RCS0)
                context_page_num = 19;
 
-       i = 2;
-       while (i < context_page_num) {
+       /* find consecutive GPAs from gma until the first inconsecutive GPA.
+        * read from the continuous GPAs into dst virtual address
+        */
+       gpa_size = 0;
+       for (i = 2; i < context_page_num; i++) {
                context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
                                (u32)((workload->ctx_desc.lrca + i) <<
                                I915_GTT_PAGE_SHIFT));
@@ -194,13 +218,26 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
                        return -EFAULT;
                }
 
-               page = i915_gem_object_get_page(ctx_obj, i);
-               dst = kmap(page);
-               intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
-                               I915_GTT_PAGE_SIZE);
-               kunmap(page);
-               i++;
+               if (gpa_size == 0) {
+                       gpa_base = context_gpa;
+                       dst = context_base + (i << I915_GTT_PAGE_SHIFT);
+               } else if (context_gpa != gpa_base + gpa_size)
+                       goto read;
+
+               gpa_size += I915_GTT_PAGE_SIZE;
+
+               if (i == context_page_num - 1)
+                       goto read;
+
+               continue;
+
+read:
+               intel_gvt_hypervisor_read_gpa(vgpu, gpa_base, dst, gpa_size);
+               gpa_base = context_gpa;
+               gpa_size = I915_GTT_PAGE_SIZE;
+               dst = context_base + (i << I915_GTT_PAGE_SHIFT);
        }
+       s->last_ctx[ring_id].valid = true;
        return 0;
 }
 
@@ -599,10 +636,9 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
                        if (bb->va && !IS_ERR(bb->va))
                                i915_gem_object_unpin_map(bb->obj);
 
-                       if (bb->vma && !IS_ERR(bb->vma)) {
+                       if (bb->vma && !IS_ERR(bb->vma))
                                i915_vma_unpin(bb->vma);
-                               i915_vma_close(bb->vma);
-                       }
+
                        i915_gem_object_put(bb->obj);
                }
                list_del(&bb->list);
@@ -610,10 +646,11 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
        }
 }
 
-static int prepare_workload(struct intel_vgpu_workload *workload)
+static int
+intel_vgpu_shadow_mm_pin(struct intel_vgpu_workload *workload)
 {
        struct intel_vgpu *vgpu = workload->vgpu;
-       struct intel_vgpu_submission *s = &vgpu->submission;
+       struct intel_vgpu_mm *m;
        int ret = 0;
 
        ret = intel_vgpu_pin_mm(workload->shadow_mm);
@@ -628,6 +665,52 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
                return -EINVAL;
        }
 
+       if (!list_empty(&workload->lri_shadow_mm)) {
+               list_for_each_entry(m, &workload->lri_shadow_mm,
+                                   ppgtt_mm.link) {
+                       ret = intel_vgpu_pin_mm(m);
+                       if (ret) {
+                               list_for_each_entry_from_reverse(m,
+                                                                &workload->lri_shadow_mm,
+                                                                ppgtt_mm.link)
+                                       intel_vgpu_unpin_mm(m);
+                               gvt_vgpu_err("LRI shadow ppgtt fail to pin\n");
+                               break;
+                       }
+               }
+       }
+
+       if (ret)
+               intel_vgpu_unpin_mm(workload->shadow_mm);
+
+       return ret;
+}
+
+static void
+intel_vgpu_shadow_mm_unpin(struct intel_vgpu_workload *workload)
+{
+       struct intel_vgpu_mm *m;
+
+       if (!list_empty(&workload->lri_shadow_mm)) {
+               list_for_each_entry(m, &workload->lri_shadow_mm,
+                                   ppgtt_mm.link)
+                       intel_vgpu_unpin_mm(m);
+       }
+       intel_vgpu_unpin_mm(workload->shadow_mm);
+}
+
+static int prepare_workload(struct intel_vgpu_workload *workload)
+{
+       struct intel_vgpu *vgpu = workload->vgpu;
+       struct intel_vgpu_submission *s = &vgpu->submission;
+       int ret = 0;
+
+       ret = intel_vgpu_shadow_mm_pin(workload);
+       if (ret) {
+               gvt_vgpu_err("fail to pin shadow mm\n");
+               return ret;
+       }
+
        update_shadow_pdps(workload);
 
        set_context_ppgtt_from_shadow(workload, s->shadow[workload->engine->id]);
@@ -674,7 +757,7 @@ err_shadow_wa_ctx:
 err_shadow_batch:
        release_shadow_batch_buffer(workload);
 err_unpin_mm:
-       intel_vgpu_unpin_mm(workload->shadow_mm);
+       intel_vgpu_shadow_mm_unpin(workload);
        return ret;
 }
 
@@ -784,15 +867,48 @@ out:
        return workload;
 }
 
+static void update_guest_pdps(struct intel_vgpu *vgpu,
+                             u64 ring_context_gpa, u32 pdp[8])
+{
+       u64 gpa;
+       int i;
+
+       gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val);
+
+       for (i = 0; i < 8; i++)
+               intel_gvt_hypervisor_write_gpa(vgpu,
+                               gpa + i * 8, &pdp[7 - i], 4);
+}
+
+static __maybe_unused bool
+check_shadow_context_ppgtt(struct execlist_ring_context *c, struct intel_vgpu_mm *m)
+{
+       if (m->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
+               u64 shadow_pdp = c->pdps[7].val | (u64) c->pdps[6].val << 32;
+
+               if (shadow_pdp != m->ppgtt_mm.shadow_pdps[0]) {
+                       gvt_dbg_mm("4-level context ppgtt not match LRI command\n");
+                       return false;
+               }
+               return true;
+       } else {
+               /* see comment in LRI handler in cmd_parser.c */
+               gvt_dbg_mm("invalid shadow mm type\n");
+               return false;
+       }
+}
+
 static void update_guest_context(struct intel_vgpu_workload *workload)
 {
        struct i915_request *rq = workload->req;
        struct intel_vgpu *vgpu = workload->vgpu;
-       struct drm_i915_gem_object *ctx_obj = rq->context->state->obj;
        struct execlist_ring_context *shadow_ring_context;
-       struct page *page;
+       struct intel_context *ctx = workload->req->context;
+       void *context_base;
        void *src;
        unsigned long context_gpa, context_page_num;
+       unsigned long gpa_base; /* first gpa of consecutive GPAs */
+       unsigned long gpa_size; /* size of consecutive GPAs*/
        int i;
        u32 ring_base;
        u32 head, tail;
@@ -801,6 +917,8 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
        gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
                      workload->ctx_desc.lrca);
 
+       GEM_BUG_ON(!intel_context_is_pinned(ctx));
+
        head = workload->rb_head;
        tail = workload->rb_tail;
        wrap_count = workload->guest_rb_head >> RB_HEAD_WRAP_CNT_OFF;
@@ -824,9 +942,14 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
        if (IS_BROADWELL(rq->i915) && rq->engine->id == RCS0)
                context_page_num = 19;
 
-       i = 2;
+       context_base = (void *) ctx->lrc_reg_state -
+                       (LRC_STATE_PN << I915_GTT_PAGE_SHIFT);
 
-       while (i < context_page_num) {
+       /* find consecutive GPAs from gma until the first inconsecutive GPA.
+        * write to the consecutive GPAs from src virtual address
+        */
+       gpa_size = 0;
+       for (i = 2; i < context_page_num; i++) {
                context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
                                (u32)((workload->ctx_desc.lrca + i) <<
                                        I915_GTT_PAGE_SHIFT));
@@ -835,19 +958,39 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
                        return;
                }
 
-               page = i915_gem_object_get_page(ctx_obj, i);
-               src = kmap(page);
-               intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
-                               I915_GTT_PAGE_SIZE);
-               kunmap(page);
-               i++;
+               if (gpa_size == 0) {
+                       gpa_base = context_gpa;
+                       src = context_base + (i << I915_GTT_PAGE_SHIFT);
+               } else if (context_gpa != gpa_base + gpa_size)
+                       goto write;
+
+               gpa_size += I915_GTT_PAGE_SIZE;
+
+               if (i == context_page_num - 1)
+                       goto write;
+
+               continue;
+
+write:
+               intel_gvt_hypervisor_write_gpa(vgpu, gpa_base, src, gpa_size);
+               gpa_base = context_gpa;
+               gpa_size = I915_GTT_PAGE_SIZE;
+               src = context_base + (i << I915_GTT_PAGE_SHIFT);
        }
 
        intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
                RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
 
-       page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
-       shadow_ring_context = kmap(page);
+       shadow_ring_context = (void *) ctx->lrc_reg_state;
+
+       if (!list_empty(&workload->lri_shadow_mm)) {
+               struct intel_vgpu_mm *m = list_last_entry(&workload->lri_shadow_mm,
+                                                         struct intel_vgpu_mm,
+                                                         ppgtt_mm.link);
+               GEM_BUG_ON(!check_shadow_context_ppgtt(shadow_ring_context, m));
+               update_guest_pdps(vgpu, workload->ring_context_gpa,
+                                 (void *)m->ppgtt_mm.guest_pdps);
+       }
 
 #define COPY_REG(name) \
        intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
@@ -864,8 +1007,6 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
                        (void *)shadow_ring_context +
                        sizeof(*shadow_ring_context),
                        I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
-
-       kunmap(page);
 }
 
 void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
@@ -959,6 +1100,9 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
 
        workload->complete(workload);
 
+       intel_vgpu_shadow_mm_unpin(workload);
+       intel_vgpu_destroy_workload(workload);
+
        atomic_dec(&s->running_workload_num);
        wake_up(&scheduler->workload_complete_wq);
 
@@ -1264,6 +1408,8 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
        atomic_set(&s->running_workload_num, 0);
        bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
 
+       memset(s->last_ctx, 0, sizeof(s->last_ctx));
+
        i915_vm_put(&ppgtt->vm);
        return 0;
 
@@ -1350,6 +1496,16 @@ void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
        release_shadow_batch_buffer(workload);
        release_shadow_wa_ctx(&workload->wa_ctx);
 
+       if (!list_empty(&workload->lri_shadow_mm)) {
+               struct intel_vgpu_mm *m, *mm;
+               list_for_each_entry_safe(m, mm, &workload->lri_shadow_mm,
+                                        ppgtt_mm.link) {
+                       list_del(&m->ppgtt_mm.link);
+                       intel_vgpu_mm_put(m);
+               }
+       }
+
+       GEM_BUG_ON(!list_empty(&workload->lri_shadow_mm));
        if (workload->shadow_mm)
                intel_vgpu_mm_put(workload->shadow_mm);
 
@@ -1368,6 +1524,7 @@ alloc_workload(struct intel_vgpu *vgpu)
 
        INIT_LIST_HEAD(&workload->list);
        INIT_LIST_HEAD(&workload->shadow_bb);
+       INIT_LIST_HEAD(&workload->lri_shadow_mm);
 
        init_waitqueue_head(&workload->shadow_ctx_status_wq);
        atomic_set(&workload->shadow_ctx_active, 0);
index bf7fc0ca4cb15fb3e605d5a2d457d0ff6c3c3384..15d317f2a4a4afb52f604c2b146bd39bfb9b5fa6 100644 (file)
@@ -87,6 +87,7 @@ struct intel_vgpu_workload {
        int status;
 
        struct intel_vgpu_mm *shadow_mm;
+       struct list_head lri_shadow_mm; /* For PPGTT load cmd */
 
        /* different submission model may need different handler */
        int (*prepare)(struct intel_vgpu_workload *);
index c4048628188a259f9967392991a190baac34c795..d960d0be5bd2fb62cae1261f35b51c8174cc76ef 100644 (file)
@@ -496,7 +496,7 @@ static int flush_lazy_signals(struct i915_active *ref)
        return err;
 }
 
-int i915_active_wait(struct i915_active *ref)
+int __i915_active_wait(struct i915_active *ref, int state)
 {
        int err;
 
@@ -511,7 +511,9 @@ int i915_active_wait(struct i915_active *ref)
        if (err)
                return err;
 
-       if (wait_var_event_interruptible(ref, i915_active_is_idle(ref)))
+       if (!i915_active_is_idle(ref) &&
+           ___wait_var_event(ref, i915_active_is_idle(ref),
+                             state, 0, 0, schedule()))
                return -EINTR;
 
        flush_work(&ref->work);
@@ -540,34 +542,88 @@ static int __await_active(struct i915_active_fence *active,
        return 0;
 }
 
+struct wait_barrier {
+       struct wait_queue_entry base;
+       struct i915_active *ref;
+};
+
+static int
+barrier_wake(wait_queue_entry_t *wq, unsigned int mode, int flags, void *key)
+{
+       struct wait_barrier *wb = container_of(wq, typeof(*wb), base);
+
+       if (i915_active_is_idle(wb->ref)) {
+               list_del(&wq->entry);
+               i915_sw_fence_complete(wq->private);
+               kfree(wq);
+       }
+
+       return 0;
+}
+
+static int __await_barrier(struct i915_active *ref, struct i915_sw_fence *fence)
+{
+       struct wait_barrier *wb;
+
+       wb = kmalloc(sizeof(*wb), GFP_KERNEL);
+       if (unlikely(!wb))
+               return -ENOMEM;
+
+       GEM_BUG_ON(i915_active_is_idle(ref));
+       if (!i915_sw_fence_await(fence)) {
+               kfree(wb);
+               return -EINVAL;
+       }
+
+       wb->base.flags = 0;
+       wb->base.func = barrier_wake;
+       wb->base.private = fence;
+       wb->ref = ref;
+
+       add_wait_queue(__var_waitqueue(ref), &wb->base);
+       return 0;
+}
+
 static int await_active(struct i915_active *ref,
                        unsigned int flags,
                        int (*fn)(void *arg, struct dma_fence *fence),
-                       void *arg)
+                       void *arg, struct i915_sw_fence *barrier)
 {
        int err = 0;
 
-       /* We must always wait for the exclusive fence! */
-       if (rcu_access_pointer(ref->excl.fence)) {
+       if (!i915_active_acquire_if_busy(ref))
+               return 0;
+
+       if (flags & I915_ACTIVE_AWAIT_EXCL &&
+           rcu_access_pointer(ref->excl.fence)) {
                err = __await_active(&ref->excl, fn, arg);
                if (err)
-                       return err;
+                       goto out;
        }
 
-       if (flags & I915_ACTIVE_AWAIT_ALL && i915_active_acquire_if_busy(ref)) {
+       if (flags & I915_ACTIVE_AWAIT_ACTIVE) {
                struct active_node *it, *n;
 
                rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
                        err = __await_active(&it->base, fn, arg);
                        if (err)
-                               break;
+                               goto out;
                }
-               i915_active_release(ref);
+       }
+
+       if (flags & I915_ACTIVE_AWAIT_BARRIER) {
+               err = flush_lazy_signals(ref);
                if (err)
-                       return err;
+                       goto out;
+
+               err = __await_barrier(ref, barrier);
+               if (err)
+                       goto out;
        }
 
-       return 0;
+out:
+       i915_active_release(ref);
+       return err;
 }
 
 static int rq_await_fence(void *arg, struct dma_fence *fence)
@@ -579,7 +635,7 @@ int i915_request_await_active(struct i915_request *rq,
                              struct i915_active *ref,
                              unsigned int flags)
 {
-       return await_active(ref, flags, rq_await_fence, rq);
+       return await_active(ref, flags, rq_await_fence, rq, &rq->submit);
 }
 
 static int sw_await_fence(void *arg, struct dma_fence *fence)
@@ -592,7 +648,7 @@ int i915_sw_fence_await_active(struct i915_sw_fence *fence,
                               struct i915_active *ref,
                               unsigned int flags)
 {
-       return await_active(ref, flags, sw_await_fence, fence);
+       return await_active(ref, flags, sw_await_fence, fence, fence);
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
@@ -818,7 +874,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
 
                GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
                llist_add(barrier_to_ll(node), &engine->barrier_tasks);
-               intel_engine_pm_put(engine);
+               intel_engine_pm_put_delay(engine, 1);
        }
 }
 
@@ -937,6 +993,59 @@ void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb)
        active_fence_cb(fence, cb);
 }
 
+struct auto_active {
+       struct i915_active base;
+       struct kref ref;
+};
+
+struct i915_active *i915_active_get(struct i915_active *ref)
+{
+       struct auto_active *aa = container_of(ref, typeof(*aa), base);
+
+       kref_get(&aa->ref);
+       return &aa->base;
+}
+
+static void auto_release(struct kref *ref)
+{
+       struct auto_active *aa = container_of(ref, typeof(*aa), ref);
+
+       i915_active_fini(&aa->base);
+       kfree(aa);
+}
+
+void i915_active_put(struct i915_active *ref)
+{
+       struct auto_active *aa = container_of(ref, typeof(*aa), base);
+
+       kref_put(&aa->ref, auto_release);
+}
+
+static int auto_active(struct i915_active *ref)
+{
+       i915_active_get(ref);
+       return 0;
+}
+
+static void auto_retire(struct i915_active *ref)
+{
+       i915_active_put(ref);
+}
+
+struct i915_active *i915_active_create(void)
+{
+       struct auto_active *aa;
+
+       aa = kmalloc(sizeof(*aa), GFP_KERNEL);
+       if (!aa)
+               return NULL;
+
+       kref_init(&aa->ref);
+       i915_active_init(&aa->base, auto_active, auto_retire);
+
+       return &aa->base;
+}
+
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/i915_active.c"
 #endif
index b3282ae7913c1db60c0941a000bdef0b756715f1..cf40581509667d5f3c44ee22f2cf8728fc37a4a0 100644 (file)
@@ -181,7 +181,11 @@ static inline bool i915_active_has_exclusive(struct i915_active *ref)
        return rcu_access_pointer(ref->excl.fence);
 }
 
-int i915_active_wait(struct i915_active *ref);
+int __i915_active_wait(struct i915_active *ref, int state);
+static inline int i915_active_wait(struct i915_active *ref)
+{
+       return __i915_active_wait(ref, TASK_INTERRUPTIBLE);
+}
 
 int i915_sw_fence_await_active(struct i915_sw_fence *fence,
                               struct i915_active *ref,
@@ -189,7 +193,9 @@ int i915_sw_fence_await_active(struct i915_sw_fence *fence,
 int i915_request_await_active(struct i915_request *rq,
                              struct i915_active *ref,
                              unsigned int flags);
-#define I915_ACTIVE_AWAIT_ALL BIT(0)
+#define I915_ACTIVE_AWAIT_EXCL BIT(0)
+#define I915_ACTIVE_AWAIT_ACTIVE BIT(1)
+#define I915_ACTIVE_AWAIT_BARRIER BIT(2)
 
 int i915_active_acquire(struct i915_active *ref);
 bool i915_active_acquire_if_busy(struct i915_active *ref);
@@ -221,4 +227,8 @@ void i915_request_add_active_barriers(struct i915_request *rq);
 void i915_active_print(struct i915_active *ref, struct drm_printer *m);
 void i915_active_unlock_wait(struct i915_active *ref);
 
+struct i915_active *i915_active_create(void);
+struct i915_active *i915_active_get(struct i915_active *ref);
+void i915_active_put(struct i915_active *ref);
+
 #endif /* _I915_ACTIVE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_config.c b/drivers/gpu/drm/i915/i915_config.c
new file mode 100644 (file)
index 0000000..b79b5f6
--- /dev/null
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "i915_drv.h"
+
+unsigned long
+i915_fence_context_timeout(const struct drm_i915_private *i915, u64 context)
+{
+       if (context && IS_ACTIVE(CONFIG_DRM_I915_FENCE_TIMEOUT))
+               return msecs_to_jiffies_timeout(CONFIG_DRM_I915_FENCE_TIMEOUT);
+
+       return 0;
+}
index 6ca797128aa16ee547323ba1c3f91a73e406a230..bca036ac662129414d38168f17fd85db68b684e8 100644 (file)
 #include <drm/drm_debugfs.h>
 
 #include "gem/i915_gem_context.h"
+#include "gt/intel_gt_buffer_pool.h"
+#include "gt/intel_gt_clock_utils.h"
 #include "gt/intel_gt_pm.h"
 #include "gt/intel_gt_requests.h"
 #include "gt/intel_reset.h"
 #include "gt/intel_rc6.h"
 #include "gt/intel_rps.h"
-#include "gt/uc/intel_guc_submission.h"
 
 #include "i915_debugfs.h"
 #include "i915_debugfs_params.h"
@@ -218,7 +219,7 @@ i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 struct file_stats {
        struct i915_address_space *vm;
        unsigned long count;
-       u64 total, unbound;
+       u64 total;
        u64 active, inactive;
        u64 closed;
 };
@@ -234,8 +235,6 @@ static int per_file_stats(int id, void *ptr, void *data)
 
        stats->count++;
        stats->total += obj->base.size;
-       if (!atomic_read(&obj->bind_count))
-               stats->unbound += obj->base.size;
 
        spin_lock(&obj->vma.lock);
        if (!stats->vm) {
@@ -285,13 +284,12 @@ static int per_file_stats(int id, void *ptr, void *data)
 
 #define print_file_stats(m, name, stats) do { \
        if (stats.count) \
-               seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu unbound, %llu closed)\n", \
+               seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu closed)\n", \
                           name, \
                           stats.count, \
                           stats.total, \
                           stats.active, \
                           stats.inactive, \
-                          stats.unbound, \
                           stats.closed); \
 } while (0)
 
@@ -745,7 +743,7 @@ i915_error_state_write(struct file *filp,
        if (!error)
                return 0;
 
-       DRM_DEBUG_DRIVER("Resetting error state\n");
+       drm_dbg(&error->i915->drm, "Resetting error state\n");
        i915_reset_error_state(error->i915);
 
        return cnt;
@@ -930,21 +928,30 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
                seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
                seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
                seq_printf(m, "CAGF: %dMHz\n", cagf);
-               seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
-                          rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
-               seq_printf(m, "RP CUR UP: %d (%dus)\n",
-                          rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
-               seq_printf(m, "RP PREV UP: %d (%dus)\n",
-                          rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
+               seq_printf(m, "RP CUR UP EI: %d (%dns)\n",
+                          rpupei,
+                          intel_gt_pm_interval_to_ns(&dev_priv->gt, rpupei));
+               seq_printf(m, "RP CUR UP: %d (%dun)\n",
+                          rpcurup,
+                          intel_gt_pm_interval_to_ns(&dev_priv->gt, rpcurup));
+               seq_printf(m, "RP PREV UP: %d (%dns)\n",
+                          rpprevup,
+                          intel_gt_pm_interval_to_ns(&dev_priv->gt, rpprevup));
                seq_printf(m, "Up threshold: %d%%\n",
                           rps->power.up_threshold);
 
-               seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
-                          rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
-               seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
-                          rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
-               seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
-                          rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
+               seq_printf(m, "RP CUR DOWN EI: %d (%dns)\n",
+                          rpdownei,
+                          intel_gt_pm_interval_to_ns(&dev_priv->gt,
+                                                     rpdownei));
+               seq_printf(m, "RP CUR DOWN: %d (%dns)\n",
+                          rpcurdown,
+                          intel_gt_pm_interval_to_ns(&dev_priv->gt,
+                                                     rpcurdown));
+               seq_printf(m, "RP PREV DOWN: %d (%dns)\n",
+                          rpprevdown,
+                          intel_gt_pm_interval_to_ns(&dev_priv->gt,
+                                                     rpprevdown));
                seq_printf(m, "Down threshold: %d%%\n",
                           rps->power.down_threshold);
 
@@ -1193,7 +1200,8 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
        struct intel_rps *rps = &dev_priv->gt.rps;
 
-       seq_printf(m, "RPS enabled? %d\n", rps->enabled);
+       seq_printf(m, "RPS enabled? %s\n", yesno(intel_rps_is_enabled(rps)));
+       seq_printf(m, "RPS active? %s\n", yesno(intel_rps_is_active(rps)));
        seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
        seq_printf(m, "Boosts outstanding? %d\n",
                   atomic_read(&rps->num_waiters));
@@ -1213,7 +1221,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
 
        seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
 
-       if (INTEL_GEN(dev_priv) >= 6 && rps->enabled && dev_priv->gt.awake) {
+       if (INTEL_GEN(dev_priv) >= 6 && intel_rps_is_active(rps)) {
                u32 rpup, rpupei;
                u32 rpdown, rpdownei;
 
@@ -1251,286 +1259,6 @@ static int i915_llc(struct seq_file *m, void *data)
        return 0;
 }
 
-static int i915_huc_load_status_info(struct seq_file *m, void *data)
-{
-       struct drm_i915_private *dev_priv = node_to_i915(m->private);
-       intel_wakeref_t wakeref;
-       struct drm_printer p;
-
-       if (!HAS_GT_UC(dev_priv))
-               return -ENODEV;
-
-       p = drm_seq_file_printer(m);
-       intel_uc_fw_dump(&dev_priv->gt.uc.huc.fw, &p);
-
-       with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
-               seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
-
-       return 0;
-}
-
-static int i915_guc_load_status_info(struct seq_file *m, void *data)
-{
-       struct drm_i915_private *dev_priv = node_to_i915(m->private);
-       intel_wakeref_t wakeref;
-       struct drm_printer p;
-
-       if (!HAS_GT_UC(dev_priv))
-               return -ENODEV;
-
-       p = drm_seq_file_printer(m);
-       intel_uc_fw_dump(&dev_priv->gt.uc.guc.fw, &p);
-
-       with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
-               u32 tmp = I915_READ(GUC_STATUS);
-               u32 i;
-
-               seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
-               seq_printf(m, "\tBootrom status = 0x%x\n",
-                          (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
-               seq_printf(m, "\tuKernel status = 0x%x\n",
-                          (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
-               seq_printf(m, "\tMIA Core status = 0x%x\n",
-                          (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
-               seq_puts(m, "\nScratch registers:\n");
-               for (i = 0; i < 16; i++) {
-                       seq_printf(m, "\t%2d: \t0x%x\n",
-                                  i, I915_READ(SOFT_SCRATCH(i)));
-               }
-       }
-
-       return 0;
-}
-
-static const char *
-stringify_guc_log_type(enum guc_log_buffer_type type)
-{
-       switch (type) {
-       case GUC_ISR_LOG_BUFFER:
-               return "ISR";
-       case GUC_DPC_LOG_BUFFER:
-               return "DPC";
-       case GUC_CRASH_DUMP_LOG_BUFFER:
-               return "CRASH";
-       default:
-               MISSING_CASE(type);
-       }
-
-       return "";
-}
-
-static void i915_guc_log_info(struct seq_file *m, struct intel_guc_log *log)
-{
-       enum guc_log_buffer_type type;
-
-       if (!intel_guc_log_relay_created(log)) {
-               seq_puts(m, "GuC log relay not created\n");
-               return;
-       }
-
-       seq_puts(m, "GuC logging stats:\n");
-
-       seq_printf(m, "\tRelay full count: %u\n",
-                  log->relay.full_count);
-
-       for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
-               seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
-                          stringify_guc_log_type(type),
-                          log->stats[type].flush,
-                          log->stats[type].sampled_overflow);
-       }
-}
-
-static int i915_guc_info(struct seq_file *m, void *data)
-{
-       struct drm_i915_private *dev_priv = node_to_i915(m->private);
-       struct intel_uc *uc = &dev_priv->gt.uc;
-
-       if (!intel_uc_uses_guc(uc))
-               return -ENODEV;
-
-       i915_guc_log_info(m, &uc->guc.log);
-
-       /* Add more as required ... */
-
-       return 0;
-}
-
-static int i915_guc_stage_pool(struct seq_file *m, void *data)
-{
-       struct drm_i915_private *dev_priv = node_to_i915(m->private);
-       struct intel_uc *uc = &dev_priv->gt.uc;
-       struct guc_stage_desc *desc = uc->guc.stage_desc_pool_vaddr;
-       int index;
-
-       if (!intel_uc_uses_guc_submission(uc))
-               return -ENODEV;
-
-       for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
-               struct intel_engine_cs *engine;
-
-               if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
-                       continue;
-
-               seq_printf(m, "GuC stage descriptor %u:\n", index);
-               seq_printf(m, "\tIndex: %u\n", desc->stage_id);
-               seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
-               seq_printf(m, "\tPriority: %d\n", desc->priority);
-               seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
-               seq_printf(m, "\tEngines used: 0x%x\n",
-                          desc->engines_used);
-               seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
-                          desc->db_trigger_phy,
-                          desc->db_trigger_cpu,
-                          desc->db_trigger_uk);
-               seq_printf(m, "\tProcess descriptor: 0x%x\n",
-                          desc->process_desc);
-               seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
-                          desc->wq_addr, desc->wq_size);
-               seq_putc(m, '\n');
-
-               for_each_uabi_engine(engine, dev_priv) {
-                       u32 guc_engine_id = engine->guc_id;
-                       struct guc_execlist_context *lrc =
-                                               &desc->lrc[guc_engine_id];
-
-                       seq_printf(m, "\t%s LRC:\n", engine->name);
-                       seq_printf(m, "\t\tContext desc: 0x%x\n",
-                                  lrc->context_desc);
-                       seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
-                       seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
-                       seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
-                       seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
-                       seq_putc(m, '\n');
-               }
-       }
-
-       return 0;
-}
-
-static int i915_guc_log_dump(struct seq_file *m, void *data)
-{
-       struct drm_info_node *node = m->private;
-       struct drm_i915_private *dev_priv = node_to_i915(node);
-       bool dump_load_err = !!node->info_ent->data;
-       struct drm_i915_gem_object *obj = NULL;
-       u32 *log;
-       int i = 0;
-
-       if (!HAS_GT_UC(dev_priv))
-               return -ENODEV;
-
-       if (dump_load_err)
-               obj = dev_priv->gt.uc.load_err_log;
-       else if (dev_priv->gt.uc.guc.log.vma)
-               obj = dev_priv->gt.uc.guc.log.vma->obj;
-
-       if (!obj)
-               return 0;
-
-       log = i915_gem_object_pin_map(obj, I915_MAP_WC);
-       if (IS_ERR(log)) {
-               DRM_DEBUG("Failed to pin object\n");
-               seq_puts(m, "(log data unaccessible)\n");
-               return PTR_ERR(log);
-       }
-
-       for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
-               seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
-                          *(log + i), *(log + i + 1),
-                          *(log + i + 2), *(log + i + 3));
-
-       seq_putc(m, '\n');
-
-       i915_gem_object_unpin_map(obj);
-
-       return 0;
-}
-
-static int i915_guc_log_level_get(void *data, u64 *val)
-{
-       struct drm_i915_private *dev_priv = data;
-       struct intel_uc *uc = &dev_priv->gt.uc;
-
-       if (!intel_uc_uses_guc(uc))
-               return -ENODEV;
-
-       *val = intel_guc_log_get_level(&uc->guc.log);
-
-       return 0;
-}
-
-static int i915_guc_log_level_set(void *data, u64 val)
-{
-       struct drm_i915_private *dev_priv = data;
-       struct intel_uc *uc = &dev_priv->gt.uc;
-
-       if (!intel_uc_uses_guc(uc))
-               return -ENODEV;
-
-       return intel_guc_log_set_level(&uc->guc.log, val);
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
-                       i915_guc_log_level_get, i915_guc_log_level_set,
-                       "%lld\n");
-
-static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
-{
-       struct drm_i915_private *i915 = inode->i_private;
-       struct intel_guc *guc = &i915->gt.uc.guc;
-       struct intel_guc_log *log = &guc->log;
-
-       if (!intel_guc_is_ready(guc))
-               return -ENODEV;
-
-       file->private_data = log;
-
-       return intel_guc_log_relay_open(log);
-}
-
-static ssize_t
-i915_guc_log_relay_write(struct file *filp,
-                        const char __user *ubuf,
-                        size_t cnt,
-                        loff_t *ppos)
-{
-       struct intel_guc_log *log = filp->private_data;
-       int val;
-       int ret;
-
-       ret = kstrtoint_from_user(ubuf, cnt, 0, &val);
-       if (ret < 0)
-               return ret;
-
-       /*
-        * Enable and start the guc log relay on value of 1.
-        * Flush log relay for any other value.
-        */
-       if (val == 1)
-               ret = intel_guc_log_relay_start(log);
-       else
-               intel_guc_log_relay_flush(log);
-
-       return ret ?: cnt;
-}
-
-static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
-{
-       struct drm_i915_private *i915 = inode->i_private;
-       struct intel_guc *guc = &i915->gt.uc.guc;
-
-       intel_guc_log_relay_close(&guc->log);
-       return 0;
-}
-
-static const struct file_operations i915_guc_log_relay_fops = {
-       .owner = THIS_MODULE,
-       .open = i915_guc_log_relay_open,
-       .write = i915_guc_log_relay_write,
-       .release = i915_guc_log_relay_release,
-};
-
 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
 {
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -1576,8 +1304,8 @@ static int i915_engine_info(struct seq_file *m, void *unused)
        seq_printf(m, "GT awake? %s [%d]\n",
                   yesno(dev_priv->gt.awake),
                   atomic_read(&dev_priv->gt.wakeref.count));
-       seq_printf(m, "CS timestamp frequency: %u kHz\n",
-                  RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
+       seq_printf(m, "CS timestamp frequency: %u Hz\n",
+                  RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_hz);
 
        p = drm_seq_file_printer(m);
        for_each_uabi_engine(engine, dev_priv)
@@ -1676,13 +1404,12 @@ static int
 i915_perf_noa_delay_set(void *data, u64 val)
 {
        struct drm_i915_private *i915 = data;
-       const u32 clk = RUNTIME_INFO(i915)->cs_timestamp_frequency_khz;
 
        /*
         * This would lead to infinite waits as we're doing timestamp
         * difference on the CS with only 32bits.
         */
-       if (val > mul_u32_u32(U32_MAX, clk))
+       if (i915_cs_timestamp_ns_to_ticks(i915, val) > U32_MAX)
                return -EINVAL;
 
        atomic64_set(&i915->perf.noa_programming_delay, val);
@@ -1757,6 +1484,9 @@ gt_drop_caches(struct intel_gt *gt, u64 val)
        if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(gt))
                intel_gt_handle_error(gt, ALL_ENGINES, 0, NULL);
 
+       if (val & DROP_FREED)
+               intel_gt_flush_buffer_pool(gt);
+
        return 0;
 }
 
@@ -2139,12 +1869,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
        {"i915_gem_objects", i915_gem_object_info, 0},
        {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
        {"i915_gem_interrupt", i915_interrupt_info, 0},
-       {"i915_guc_info", i915_guc_info, 0},
-       {"i915_guc_load_status", i915_guc_load_status_info, 0},
-       {"i915_guc_log_dump", i915_guc_log_dump, 0},
-       {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
-       {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
-       {"i915_huc_load_status", i915_huc_load_status_info, 0},
        {"i915_frequency_info", i915_frequency_info, 0},
        {"i915_ring_freq_table", i915_ring_freq_table, 0},
        {"i915_context_status", i915_context_status, 0},
@@ -2172,11 +1896,9 @@ static const struct i915_debugfs_files {
        {"i915_error_state", &i915_error_state_fops},
        {"i915_gpu_info", &i915_gpu_info_fops},
 #endif
-       {"i915_guc_log_level", &i915_guc_log_level_fops},
-       {"i915_guc_log_relay", &i915_guc_log_relay_fops},
 };
 
-int i915_debugfs_register(struct drm_i915_private *dev_priv)
+void i915_debugfs_register(struct drm_i915_private *dev_priv)
 {
        struct drm_minor *minor = dev_priv->drm.primary;
        int i;
@@ -2193,7 +1915,7 @@ int i915_debugfs_register(struct drm_i915_private *dev_priv)
                                    i915_debugfs_files[i].fops);
        }
 
-       return drm_debugfs_create_files(i915_debugfs_list,
-                                       I915_DEBUGFS_ENTRIES,
-                                       minor->debugfs_root, minor);
+       drm_debugfs_create_files(i915_debugfs_list,
+                                I915_DEBUGFS_ENTRIES,
+                                minor->debugfs_root, minor);
 }
index 6da39c76ab5e7e687e83bbb4c3fbd453a2e12bbb..1de2736f124872e25cf269afa1913f8668f251ac 100644 (file)
@@ -12,10 +12,10 @@ struct drm_i915_private;
 struct seq_file;
 
 #ifdef CONFIG_DEBUG_FS
-int i915_debugfs_register(struct drm_i915_private *dev_priv);
+void i915_debugfs_register(struct drm_i915_private *dev_priv);
 void i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj);
 #else
-static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) { return 0; }
+static inline void i915_debugfs_register(struct drm_i915_private *dev_priv) {}
 static inline void i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) {}
 #endif
 
index 81a4621853dbd608ae95417060d82e2a822ee780..34ee12f3f02d465d4ad7703548080fb97ba8b3a2 100644 (file)
@@ -43,6 +43,7 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_ioctl.h>
 #include <drm/drm_irq.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_probe_helper.h>
 
 #include "display/intel_acpi.h"
@@ -227,14 +228,14 @@ static int i915_driver_modeset_probe_noirq(struct drm_i915_private *i915)
                ret = drm_vblank_init(&i915->drm,
                                      INTEL_NUM_PIPES(i915));
                if (ret)
-                       goto out;
+                       return ret;
        }
 
        intel_bios_init(i915);
 
        ret = intel_vga_register(i915);
        if (ret)
-               goto out;
+               goto cleanup_bios;
 
        intel_power_domains_init_hw(i915, false);
 
@@ -242,13 +243,16 @@ static int i915_driver_modeset_probe_noirq(struct drm_i915_private *i915)
 
        ret = intel_modeset_init_noirq(i915);
        if (ret)
-               goto cleanup_vga_client;
+               goto cleanup_vga_client_pw_domain_csr;
 
        return 0;
 
-cleanup_vga_client:
+cleanup_vga_client_pw_domain_csr:
+       intel_csr_ucode_fini(i915);
+       intel_power_domains_driver_remove(i915);
        intel_vga_unregister(i915);
-out:
+cleanup_bios:
+       intel_bios_driver_remove(i915);
        return ret;
 }
 
@@ -307,13 +311,13 @@ static void i915_driver_modeset_remove(struct drm_i915_private *i915)
 /* part #2: call after irq uninstall */
 static void i915_driver_modeset_remove_noirq(struct drm_i915_private *i915)
 {
-       intel_modeset_driver_remove_noirq(i915);
+       intel_csr_ucode_fini(i915);
 
-       intel_bios_driver_remove(i915);
+       intel_power_domains_driver_remove(i915);
 
        intel_vga_unregister(i915);
 
-       intel_csr_ucode_fini(i915);
+       intel_bios_driver_remove(i915);
 }
 
 static void intel_init_dpio(struct drm_i915_private *dev_priv)
@@ -565,6 +569,62 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
        intel_gvt_sanitize_options(dev_priv);
 }
 
+/**
+ * i915_set_dma_info - set all relevant PCI dma info as configured for the
+ * platform
+ * @i915: valid i915 instance
+ *
+ * Set the dma max segment size, device and coherent masks.  The dma mask set
+ * needs to occur before i915_ggtt_probe_hw.
+ *
+ * A couple of platforms have special needs.  Address them as well.
+ *
+ */
+static int i915_set_dma_info(struct drm_i915_private *i915)
+{
+       struct pci_dev *pdev = i915->drm.pdev;
+       unsigned int mask_size = INTEL_INFO(i915)->dma_mask_size;
+       int ret;
+
+       GEM_BUG_ON(!mask_size);
+
+       /*
+        * We don't have a max segment size, so set it to the max so sg's
+        * debugging layer doesn't complain
+        */
+       dma_set_max_seg_size(&pdev->dev, UINT_MAX);
+
+       ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(mask_size));
+       if (ret)
+               goto mask_err;
+
+       /* overlay on gen2 is broken and can't address above 1G */
+       if (IS_GEN(i915, 2))
+               mask_size = 30;
+
+       /*
+        * 965GM sometimes incorrectly writes to hardware status page (HWS)
+        * using 32bit addressing, overwriting memory if HWS is located
+        * above 4GB.
+        *
+        * The documentation also mentions an issue with undefined
+        * behaviour if any general state is accessed within a page above 4GB,
+        * which also needs to be handled carefully.
+        */
+       if (IS_I965G(i915) || IS_I965GM(i915))
+               mask_size = 32;
+
+       ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(mask_size));
+       if (ret)
+               goto mask_err;
+
+       return 0;
+
+mask_err:
+       drm_err(&i915->drm, "Can't set DMA mask/consistent mask (%d)\n", ret);
+       return ret;
+}
+
 /**
  * i915_driver_hw_probe - setup state requiring device access
  * @dev_priv: device private
@@ -610,6 +670,10 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
        /* needs to be done before ggtt probe */
        intel_dram_edram_detect(dev_priv);
 
+       ret = i915_set_dma_info(dev_priv);
+       if (ret)
+               return ret;
+
        i915_perf_init(dev_priv);
 
        ret = i915_ggtt_probe_hw(dev_priv);
@@ -638,40 +702,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
 
        pci_set_master(pdev);
 
-       /*
-        * We don't have a max segment size, so set it to the max so sg's
-        * debugging layer doesn't complain
-        */
-       dma_set_max_seg_size(&pdev->dev, UINT_MAX);
-
-       /* overlay on gen2 is broken and can't address above 1G */
-       if (IS_GEN(dev_priv, 2)) {
-               ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
-               if (ret) {
-                       drm_err(&dev_priv->drm, "failed to set DMA mask\n");
-
-                       goto err_mem_regions;
-               }
-       }
-
-       /* 965GM sometimes incorrectly writes to hardware status page (HWS)
-        * using 32bit addressing, overwriting memory if HWS is located
-        * above 4GB.
-        *
-        * The documentation also mentions an issue with undefined
-        * behaviour if any general state is accessed within a page above 4GB,
-        * which also needs to be handled carefully.
-        */
-       if (IS_I965G(dev_priv) || IS_I965GM(dev_priv)) {
-               ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
-
-               if (ret) {
-                       drm_err(&dev_priv->drm, "failed to set DMA mask\n");
-
-                       goto err_mem_regions;
-               }
-       }
-
        cpu_latency_qos_add_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
 
        intel_gt_init_workarounds(dev_priv);
@@ -876,17 +906,11 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
                (struct intel_device_info *)ent->driver_data;
        struct intel_device_info *device_info;
        struct drm_i915_private *i915;
-       int err;
-
-       i915 = kzalloc(sizeof(*i915), GFP_KERNEL);
-       if (!i915)
-               return ERR_PTR(-ENOMEM);
 
-       err = drm_dev_init(&i915->drm, &driver, &pdev->dev);
-       if (err) {
-               kfree(i915);
-               return ERR_PTR(err);
-       }
+       i915 = devm_drm_dev_alloc(&pdev->dev, &driver,
+                                 struct drm_i915_private, drm);
+       if (IS_ERR(i915))
+               return i915;
 
        i915->drm.pdev = pdev;
        pci_set_drvdata(pdev, i915);
@@ -901,17 +925,6 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
        return i915;
 }
 
-static void i915_driver_destroy(struct drm_i915_private *i915)
-{
-       struct pci_dev *pdev = i915->drm.pdev;
-
-       drm_dev_fini(&i915->drm);
-       kfree(i915);
-
-       /* And make sure we never chase our dangling pointer from pci_dev */
-       pci_set_drvdata(pdev, NULL);
-}
-
 /**
  * i915_driver_probe - setup chip and create an initial config
  * @pdev: PCI device
@@ -993,12 +1006,14 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        i915_welcome_messages(i915);
 
+       i915->do_release = true;
+
        return 0;
 
 out_cleanup_irq:
        intel_irq_uninstall(i915);
 out_cleanup_modeset:
-       /* FIXME */
+       i915_driver_modeset_remove_noirq(i915);
 out_cleanup_hw:
        i915_driver_hw_remove(i915);
        intel_memory_regions_driver_release(i915);
@@ -1012,7 +1027,6 @@ out_pci_disable:
        pci_disable_device(pdev);
 out_fini:
        i915_probe_error(i915, "Device initialization failed (%d)\n", ret);
-       i915_driver_destroy(i915);
        return ret;
 }
 
@@ -1035,12 +1049,12 @@ void i915_driver_remove(struct drm_i915_private *i915)
 
        intel_irq_uninstall(i915);
 
-       i915_driver_modeset_remove_noirq(i915);
+       intel_modeset_driver_remove_noirq(i915);
 
        i915_reset_error_state(i915);
        i915_gem_driver_remove(i915);
 
-       intel_power_domains_driver_remove(i915);
+       i915_driver_modeset_remove_noirq(i915);
 
        i915_driver_hw_remove(i915);
 
@@ -1052,6 +1066,9 @@ static void i915_driver_release(struct drm_device *dev)
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
 
+       if (!dev_priv->do_release)
+               return;
+
        disable_rpm_wakeref_asserts(rpm);
 
        i915_gem_driver_release(dev_priv);
@@ -1065,7 +1082,6 @@ static void i915_driver_release(struct drm_device *dev)
        intel_runtime_pm_driver_release(rpm);
 
        i915_driver_late_release(dev_priv);
-       i915_driver_destroy(dev_priv);
 }
 
 static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
@@ -1286,7 +1302,6 @@ static int i915_drm_resume(struct drm_device *dev)
                drm_err(&dev_priv->drm, "failed to re-enable GGTT\n");
 
        i915_ggtt_resume(&dev_priv->ggtt);
-       i915_gem_restore_fences(&dev_priv->ggtt);
 
        intel_csr_ucode_resume(dev_priv);
 
@@ -1604,8 +1619,6 @@ static int intel_runtime_suspend(struct device *kdev)
 
                intel_gt_runtime_resume(&dev_priv->gt);
 
-               i915_gem_restore_fences(&dev_priv->ggtt);
-
                enable_rpm_wakeref_asserts(rpm);
 
                return ret;
@@ -1685,7 +1698,6 @@ static int intel_runtime_resume(struct device *kdev)
         * we can do is to hope that things will still work (and disable RPM).
         */
        intel_gt_runtime_resume(&dev_priv->gt);
-       i915_gem_restore_fences(&dev_priv->ggtt);
 
        /*
         * On VLV/CHV display interrupts are part of the display
index 62b901ffabf9bd8087b3fe666072e4bc6271b253..adb9bf34cf97a3bb21690c8ec73bbeb382a0f7b3 100644 (file)
@@ -92,7 +92,6 @@
 #include "intel_wopcm.h"
 
 #include "i915_gem.h"
-#include "i915_gem_fence_reg.h"
 #include "i915_gem_gtt.h"
 #include "i915_gpu_error.h"
 #include "i915_perf_types.h"
 
 #define DRIVER_NAME            "i915"
 #define DRIVER_DESC            "Intel Graphics"
-#define DRIVER_DATE            "20200313"
-#define DRIVER_TIMESTAMP       1584144591
+#define DRIVER_DATE            "20200515"
+#define DRIVER_TIMESTAMP       1589543364
 
 struct drm_i915_gem_object;
 
@@ -149,6 +148,8 @@ enum hpd_pin {
 struct i915_hotplug {
        struct delayed_work hotplug_work;
 
+       const u32 *hpd, *pch_hpd;
+
        struct {
                unsigned long last_jiffies;
                int count;
@@ -417,6 +418,7 @@ struct intel_fbc {
                struct {
                        const struct drm_format_info *format;
                        unsigned int stride;
+                       u64 modifier;
                } fb;
                u16 gen9_wa_cfb_stride;
                s8 fence_id;
@@ -510,6 +512,7 @@ struct i915_psr {
        u32 dc3co_exit_delay;
        struct delayed_work dc3co_work;
        bool force_mode_changed;
+       struct drm_dp_vsc_sdp vsc;
 };
 
 #define QUIRK_LVDS_SSC_DISABLE (1<<1)
@@ -540,7 +543,6 @@ struct i915_suspend_saved_registers {
        u32 saveSWF0[16];
        u32 saveSWF1[16];
        u32 saveSWF3[3];
-       u64 saveFENCE[I915_MAX_NUM_FENCES];
        u32 savePCH_PORT_HOTPLUG;
        u16 saveGCDGMBUS;
 };
@@ -615,13 +617,14 @@ struct i915_gem_mm {
 
 #define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */
 
-#define I915_RESET_TIMEOUT (10 * HZ) /* 10s */
-#define I915_FENCE_TIMEOUT (10 * HZ) /* 10s */
-
-#define I915_ENGINE_DEAD_TIMEOUT  (4 * HZ)  /* Seqno, head and subunits dead */
-#define I915_SEQNO_DEAD_TIMEOUT   (12 * HZ) /* Seqno dead with active head */
+unsigned long i915_fence_context_timeout(const struct drm_i915_private *i915,
+                                        u64 context);
 
-#define I915_ENGINE_WEDGED_TIMEOUT  (60 * HZ)  /* Reset but no recovery? */
+static inline unsigned long
+i915_fence_timeout(const struct drm_i915_private *i915)
+{
+       return i915_fence_context_timeout(i915, U64_MAX);
+}
 
 /* Amount of SAGV/QGV points, BSpec precisely defines this */
 #define I915_NUM_QGV_POINTS 8
@@ -823,6 +826,9 @@ struct i915_selftest_stash {
 struct drm_i915_private {
        struct drm_device drm;
 
+       /* FIXME: Device release actions should all be moved to drmm_ */
+       bool do_release;
+
        const struct intel_device_info __info; /* Use INTEL_INFO() to access. */
        struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
        struct intel_driver_caps caps;
@@ -885,7 +891,6 @@ struct drm_i915_private {
 
        struct pci_dev *bridge_dev;
 
-       struct intel_engine_cs *engine[I915_NUM_ENGINES];
        struct rb_root uabi_engines;
 
        struct resource mch_res;
@@ -1506,6 +1511,11 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
 #define IS_ICL_REVID(p, since, until) \
        (IS_ICELAKE(p) && IS_REVID(p, since, until))
 
+#define EHL_REVID_A0            0x0
+
+#define IS_EHL_REVID(p, since, until) \
+       (IS_ELKHARTLAKE(p) && IS_REVID(p, since, until))
+
 #define TGL_REVID_A0           0x0
 #define TGL_REVID_B0           0x1
 #define TGL_REVID_C0           0x2
@@ -1606,7 +1616,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
 #define HAS_DDI(dev_priv)               (INTEL_INFO(dev_priv)->display.has_ddi)
 #define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->has_fpga_dbg)
 #define HAS_PSR(dev_priv)               (INTEL_INFO(dev_priv)->display.has_psr)
-#define HAS_TRANSCODER_EDP(dev_priv)    (INTEL_INFO(dev_priv)->trans_offsets[TRANSCODER_EDP] != 0)
+#define HAS_TRANSCODER(dev_priv, trans)         ((INTEL_INFO(dev_priv)->cpu_transcoder_mask & BIT(trans)) != 0)
 
 #define HAS_RC6(dev_priv)               (INTEL_INFO(dev_priv)->has_rc6)
 #define HAS_RC6p(dev_priv)              (INTEL_INFO(dev_priv)->has_rc6p)
@@ -1740,6 +1750,7 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
                           unsigned long flags);
 #define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
 #define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1)
+#define I915_GEM_OBJECT_UNBIND_TEST BIT(2)
 
 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
 
@@ -1913,4 +1924,16 @@ i915_coherent_map_type(struct drm_i915_private *i915)
        return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
 }
 
+static inline u64 i915_cs_timestamp_ns_to_ticks(struct drm_i915_private *i915, u64 val)
+{
+       return DIV_ROUND_UP_ULL(val * RUNTIME_INFO(i915)->cs_timestamp_frequency_hz,
+                               1000000000);
+}
+
+static inline u64 i915_cs_timestamp_ticks_to_ns(struct drm_i915_private *i915, u64 val)
+{
+       return div_u64(val * 1000000000,
+                      RUNTIME_INFO(i915)->cs_timestamp_frequency_hz);
+}
+
 #endif
index ca5420012a22bfb8f88433c15217df8592f5c0f8..0cbcb9f54e7d25c7e862b326182c0031a78182b6 100644 (file)
@@ -118,7 +118,7 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
        struct i915_vma *vma;
        int ret;
 
-       if (!atomic_read(&obj->bind_count))
+       if (list_empty(&obj->vma.list))
                return 0;
 
        /*
@@ -141,6 +141,11 @@ try_again:
                if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK))
                        continue;
 
+               if (flags & I915_GEM_OBJECT_UNBIND_TEST) {
+                       ret = -EBUSY;
+                       break;
+               }
+
                ret = -EAGAIN;
                if (!i915_vm_tryopen(vm))
                        break;
@@ -993,18 +998,16 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
                        return ERR_PTR(ret);
        }
 
+       ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
+       if (ret)
+               return ERR_PTR(ret);
+
        if (vma->fence && !i915_gem_object_is_tiled(obj)) {
                mutex_lock(&ggtt->vm.mutex);
-               ret = i915_vma_revoke_fence(vma);
+               i915_vma_revoke_fence(vma);
                mutex_unlock(&ggtt->vm.mutex);
-               if (ret)
-                       return ERR_PTR(ret);
        }
 
-       ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
-       if (ret)
-               return ERR_PTR(ret);
-
        ret = i915_vma_wait_for_bind(vma);
        if (ret) {
                i915_vma_unpin(vma);
@@ -1156,7 +1159,6 @@ err_unlock:
                /* Minimal basic recovery for KMS */
                ret = i915_ggtt_enable_hw(dev_priv);
                i915_ggtt_resume(&dev_priv->ggtt);
-               i915_gem_restore_fences(&dev_priv->ggtt);
                intel_init_clock_gating(dev_priv);
        }
 
index 02ad1acd117c4dea7d9ca371996de84aee89105d..6501939929d5d737f181e8b6edc6a6945467b4f6 100644 (file)
@@ -226,7 +226,12 @@ found:
 
        while (ret == 0 && (node = drm_mm_scan_color_evict(&scan))) {
                vma = container_of(node, struct i915_vma, node);
-               ret = __i915_vma_unbind(vma);
+
+               /* If we find any non-objects (!vma), we cannot evict them */
+               if (vma->node.color != I915_COLOR_UNEVICTABLE)
+                       ret = __i915_vma_unbind(vma);
+               else
+                       ret = -ENOSPC; /* XXX search failed, try again? */
        }
 
        return ret;
index 54fce81d5724a22ecba2945d21f11016ab9f29b1..d042644b9cd28dda0087c9c5ae9e9c258f256166 100644 (file)
@@ -153,7 +153,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
                        return -ENODEV;
                break;
        case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
-               value = 1000 * RUNTIME_INFO(i915)->cs_timestamp_frequency_khz;
+               value = RUNTIME_INFO(i915)->cs_timestamp_frequency_hz;
                break;
        case I915_PARAM_MMAP_GTT_COHERENT:
                value = INTEL_INFO(i915)->has_coherent_ggtt;
index 5c8e51d2ba5b3e139581f14f8713b5f555bb64b7..eec292d06f1159dfe83f96760c348a0db2466520 100644 (file)
@@ -467,14 +467,14 @@ static void error_print_request(struct drm_i915_error_state_buf *m,
        if (!erq->seqno)
                return;
 
-       err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, start %08x, head %08x, tail %08x\n",
+       err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n",
                   prefix, erq->pid, erq->context, erq->seqno,
                   test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
                            &erq->flags) ? "!" : "",
                   test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
                            &erq->flags) ? "+" : "",
                   erq->sched_attr.priority,
-                  erq->start, erq->head, erq->tail);
+                  erq->head, erq->tail);
 }
 
 static void error_print_context(struct drm_i915_error_state_buf *m,
@@ -1211,7 +1211,6 @@ static void record_request(const struct i915_request *request,
        erq->context = request->fence.context;
        erq->seqno = request->fence.seqno;
        erq->sched_attr = request->sched.attr;
-       erq->start = i915_ggtt_offset(request->ring->vma);
        erq->head = request->head;
        erq->tail = request->tail;
 
@@ -1321,26 +1320,6 @@ capture_user(struct intel_engine_capture_vma *capture,
        return capture;
 }
 
-static struct i915_vma_coredump *
-capture_object(const struct intel_gt *gt,
-              struct drm_i915_gem_object *obj,
-              const char *name,
-              struct i915_vma_compress *compress)
-{
-       if (obj && i915_gem_object_has_pages(obj)) {
-               struct i915_vma fake = {
-                       .node = { .start = U64_MAX, .size = obj->base.size },
-                       .size = obj->base.size,
-                       .pages = obj->mm.pages,
-                       .obj = obj,
-               };
-
-               return i915_vma_coredump_create(gt, &fake, name, compress);
-       } else {
-               return NULL;
-       }
-}
-
 static void add_vma(struct intel_engine_coredump *ee,
                    struct i915_vma_coredump *vma)
 {
@@ -1429,12 +1408,6 @@ intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
                                         engine->wa_ctx.vma,
                                         "WA context",
                                         compress));
-
-       add_vma(ee,
-               capture_object(engine->gt,
-                              engine->default_state,
-                              "NULL context",
-                              compress));
 }
 
 static struct intel_engine_coredump *
@@ -1860,7 +1833,7 @@ void i915_error_state_store(struct i915_gpu_coredump *error)
                return;
 
        i915 = error->i915;
-       dev_info(i915->drm.dev, "%s\n", error_msg(error));
+       drm_info(&i915->drm, "%s\n", error_msg(error));
 
        if (error->simulated ||
            cmpxchg(&i915->gpu_error.first_error, NULL, error))
index 0d1f6c8ff3556fa3d8923692741272045141358e..76b80fbfb7e96682d0c33d1243d8d3c5606a60d3 100644 (file)
@@ -42,7 +42,7 @@ struct i915_vma_coredump {
        int num_pages;
        int page_count;
        int unused;
-       u32 *pages[0];
+       u32 *pages[];
 };
 
 struct i915_request_coredump {
@@ -50,7 +50,6 @@ struct i915_request_coredump {
        pid_t pid;
        u32 context;
        u32 seqno;
-       u32 start;
        u32 head;
        u32 tail;
        struct i915_sched_attr sched_attr;
index 8a2b83807ffcddf9190d726bee60aea57413181c..4dc601dffc081f8bdba623d96de91f6d1ca19699 100644 (file)
@@ -124,7 +124,6 @@ static const u32 hpd_status_i915[HPD_NUM_PINS] = {
        [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
 };
 
-/* BXT hpd list */
 static const u32 hpd_bxt[HPD_NUM_PINS] = {
        [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
        [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
@@ -168,6 +167,49 @@ static const u32 hpd_tgp[HPD_NUM_PINS] = {
        [HPD_PORT_I] = SDE_TC_HOTPLUG_ICP(PORT_TC6),
 };
 
+static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
+{
+       struct i915_hotplug *hpd = &dev_priv->hotplug;
+
+       if (HAS_GMCH(dev_priv)) {
+               if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
+                   IS_CHERRYVIEW(dev_priv))
+                       hpd->hpd = hpd_status_g4x;
+               else
+                       hpd->hpd = hpd_status_i915;
+               return;
+       }
+
+       if (INTEL_GEN(dev_priv) >= 12)
+               hpd->hpd = hpd_gen12;
+       else if (INTEL_GEN(dev_priv) >= 11)
+               hpd->hpd = hpd_gen11;
+       else if (IS_GEN9_LP(dev_priv))
+               hpd->hpd = hpd_bxt;
+       else if (INTEL_GEN(dev_priv) >= 8)
+               hpd->hpd = hpd_bdw;
+       else if (INTEL_GEN(dev_priv) >= 7)
+               hpd->hpd = hpd_ivb;
+       else
+               hpd->hpd = hpd_ilk;
+
+       if (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv))
+               return;
+
+       if (HAS_PCH_TGP(dev_priv) || HAS_PCH_JSP(dev_priv))
+               hpd->pch_hpd = hpd_tgp;
+       else if (HAS_PCH_ICP(dev_priv) || HAS_PCH_MCC(dev_priv))
+               hpd->pch_hpd = hpd_icp;
+       else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv))
+               hpd->pch_hpd = hpd_spt;
+       else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_CPT(dev_priv))
+               hpd->pch_hpd = hpd_cpt;
+       else if (HAS_PCH_IBX(dev_priv))
+               hpd->pch_hpd = hpd_ibx;
+       else
+               MISSING_CASE(INTEL_PCH_TYPE(dev_priv));
+}
+
 static void
 intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
 {
@@ -1504,33 +1546,27 @@ static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
                                 u32 hotplug_status)
 {
        u32 pin_mask = 0, long_mask = 0;
+       u32 hotplug_trigger;
 
-       if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
-           IS_CHERRYVIEW(dev_priv)) {
-               u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
-
-               if (hotplug_trigger) {
-                       intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
-                                          hotplug_trigger, hotplug_trigger,
-                                          hpd_status_g4x,
-                                          i9xx_port_hotplug_long_detect);
+       if (IS_G4X(dev_priv) ||
+           IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+               hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
+       else
+               hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
 
-                       intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
-               }
+       if (hotplug_trigger) {
+               intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+                                  hotplug_trigger, hotplug_trigger,
+                                  dev_priv->hotplug.hpd,
+                                  i9xx_port_hotplug_long_detect);
 
-               if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
-                       dp_aux_irq_handler(dev_priv);
-       } else {
-               u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
-
-               if (hotplug_trigger) {
-                       intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
-                                          hotplug_trigger, hotplug_trigger,
-                                          hpd_status_i915,
-                                          i9xx_port_hotplug_long_detect);
-                       intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
-               }
+               intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
        }
+
+       if ((IS_G4X(dev_priv) ||
+            IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+           hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
+               dp_aux_irq_handler(dev_priv);
 }
 
 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
@@ -1696,8 +1732,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
 }
 
 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
-                               u32 hotplug_trigger,
-                               const u32 hpd[HPD_NUM_PINS])
+                               u32 hotplug_trigger)
 {
        u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
 
@@ -1720,8 +1755,9 @@ static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
        if (!hotplug_trigger)
                return;
 
-       intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
-                          dig_hotplug_reg, hpd,
+       intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+                          hotplug_trigger, dig_hotplug_reg,
+                          dev_priv->hotplug.pch_hpd,
                           pch_port_hotplug_long_detect);
 
        intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
@@ -1732,7 +1768,7 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
        enum pipe pipe;
        u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
 
-       ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
+       ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
 
        if (pch_iir & SDE_AUDIO_POWER_MASK) {
                int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -1820,7 +1856,7 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
        enum pipe pipe;
        u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
 
-       ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
+       ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
 
        if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
                int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
@@ -1857,22 +1893,18 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
        u32 ddi_hotplug_trigger, tc_hotplug_trigger;
        u32 pin_mask = 0, long_mask = 0;
        bool (*tc_port_hotplug_long_detect)(enum hpd_pin pin, u32 val);
-       const u32 *pins;
 
        if (HAS_PCH_TGP(dev_priv)) {
                ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
                tc_hotplug_trigger = pch_iir & SDE_TC_MASK_TGP;
                tc_port_hotplug_long_detect = tgp_tc_port_hotplug_long_detect;
-               pins = hpd_tgp;
        } else if (HAS_PCH_JSP(dev_priv)) {
                ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
                tc_hotplug_trigger = 0;
-               pins = hpd_tgp;
        } else if (HAS_PCH_MCC(dev_priv)) {
                ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
                tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_ICP(PORT_TC1);
                tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect;
-               pins = hpd_icp;
        } else {
                drm_WARN(&dev_priv->drm, !HAS_PCH_ICP(dev_priv),
                         "Unrecognized PCH type 0x%x\n",
@@ -1881,7 +1913,6 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
                ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
                tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
                tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect;
-               pins = hpd_icp;
        }
 
        if (ddi_hotplug_trigger) {
@@ -1891,8 +1922,8 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
                I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
 
                intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
-                                  ddi_hotplug_trigger,
-                                  dig_hotplug_reg, pins,
+                                  ddi_hotplug_trigger, dig_hotplug_reg,
+                                  dev_priv->hotplug.pch_hpd,
                                   icp_ddi_port_hotplug_long_detect);
        }
 
@@ -1903,8 +1934,8 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
                I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
 
                intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
-                                  tc_hotplug_trigger,
-                                  dig_hotplug_reg, pins,
+                                  tc_hotplug_trigger, dig_hotplug_reg,
+                                  dev_priv->hotplug.pch_hpd,
                                   tc_port_hotplug_long_detect);
        }
 
@@ -1929,7 +1960,8 @@ static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
                I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
 
                intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
-                                  hotplug_trigger, dig_hotplug_reg, hpd_spt,
+                                  hotplug_trigger, dig_hotplug_reg,
+                                  dev_priv->hotplug.pch_hpd,
                                   spt_port_hotplug_long_detect);
        }
 
@@ -1940,7 +1972,8 @@ static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
                I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
 
                intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
-                                  hotplug2_trigger, dig_hotplug_reg, hpd_spt,
+                                  hotplug2_trigger, dig_hotplug_reg,
+                                  dev_priv->hotplug.pch_hpd,
                                   spt_port_hotplug2_long_detect);
        }
 
@@ -1952,16 +1985,16 @@ static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
 }
 
 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
-                               u32 hotplug_trigger,
-                               const u32 hpd[HPD_NUM_PINS])
+                               u32 hotplug_trigger)
 {
        u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
 
        dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
        I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
 
-       intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
-                          dig_hotplug_reg, hpd,
+       intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+                          hotplug_trigger, dig_hotplug_reg,
+                          dev_priv->hotplug.hpd,
                           ilk_port_hotplug_long_detect);
 
        intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
@@ -1974,7 +2007,7 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
        u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
 
        if (hotplug_trigger)
-               ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
+               ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
 
        if (de_iir & DE_AUX_CHANNEL_A)
                dp_aux_irq_handler(dev_priv);
@@ -2020,7 +2053,7 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
        u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
 
        if (hotplug_trigger)
-               ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
+               ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
 
        if (de_iir & DE_ERR_INT_IVB)
                ivb_err_int_handler(dev_priv);
@@ -2130,16 +2163,16 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg)
 }
 
 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
-                               u32 hotplug_trigger,
-                               const u32 hpd[HPD_NUM_PINS])
+                               u32 hotplug_trigger)
 {
        u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
 
        dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
        I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
 
-       intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
-                          dig_hotplug_reg, hpd,
+       intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+                          hotplug_trigger, dig_hotplug_reg,
+                          dev_priv->hotplug.hpd,
                           bxt_port_hotplug_long_detect);
 
        intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
@@ -2151,15 +2184,11 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
        u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
        u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
        long_pulse_detect_func long_pulse_detect;
-       const u32 *hpd;
 
-       if (INTEL_GEN(dev_priv) >= 12) {
+       if (INTEL_GEN(dev_priv) >= 12)
                long_pulse_detect = gen12_port_hotplug_long_detect;
-               hpd = hpd_gen12;
-       } else {
+       else
                long_pulse_detect = gen11_port_hotplug_long_detect;
-               hpd = hpd_gen11;
-       }
 
        if (trigger_tc) {
                u32 dig_hotplug_reg;
@@ -2167,8 +2196,10 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
                dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
                I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
 
-               intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc,
-                                  dig_hotplug_reg, hpd, long_pulse_detect);
+               intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+                                  trigger_tc, dig_hotplug_reg,
+                                  dev_priv->hotplug.hpd,
+                                  long_pulse_detect);
        }
 
        if (trigger_tbt) {
@@ -2177,8 +2208,10 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
                dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
                I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
 
-               intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt,
-                                  dig_hotplug_reg, hpd, long_pulse_detect);
+               intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+                                  trigger_tbt, dig_hotplug_reg,
+                                  dev_priv->hotplug.hpd,
+                                  long_pulse_detect);
        }
 
        if (pin_mask)
@@ -2309,15 +2342,13 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
                        if (IS_GEN9_LP(dev_priv)) {
                                tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
                                if (tmp_mask) {
-                                       bxt_hpd_irq_handler(dev_priv, tmp_mask,
-                                                           hpd_bxt);
+                                       bxt_hpd_irq_handler(dev_priv, tmp_mask);
                                        found = true;
                                }
                        } else if (IS_BROADWELL(dev_priv)) {
                                tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
                                if (tmp_mask) {
-                                       ilk_hpd_irq_handler(dev_priv,
-                                                           tmp_mask, hpd_bdw);
+                                       ilk_hpd_irq_handler(dev_priv, tmp_mask);
                                        found = true;
                                }
                        }
@@ -2870,6 +2901,14 @@ static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
 
        if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
                GEN3_IRQ_RESET(uncore, SDE);
+
+       /* Wa_14010685332:icl */
+       if (INTEL_PCH_TYPE(dev_priv) == PCH_ICP) {
+               intel_uncore_rmw(uncore, SOUTH_CHICKEN1,
+                                SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
+               intel_uncore_rmw(uncore, SOUTH_CHICKEN1,
+                                SBCLK_RUN_REFCLK_DIS, 0);
+       }
 }
 
 static void gen11_irq_reset(struct drm_i915_private *dev_priv)
@@ -2989,13 +3028,12 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
 {
        u32 hotplug_irqs, enabled_irqs;
 
-       if (HAS_PCH_IBX(dev_priv)) {
+       if (HAS_PCH_IBX(dev_priv))
                hotplug_irqs = SDE_HOTPLUG_MASK;
-               enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
-       } else {
+       else
                hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
-               enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
-       }
+
+       enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
 
        ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
 
@@ -3021,13 +3059,12 @@ static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv,
 
 static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv,
                              u32 sde_ddi_mask, u32 sde_tc_mask,
-                             u32 ddi_enable_mask, u32 tc_enable_mask,
-                             const u32 *pins)
+                             u32 ddi_enable_mask, u32 tc_enable_mask)
 {
        u32 hotplug_irqs, enabled_irqs;
 
        hotplug_irqs = sde_ddi_mask | sde_tc_mask;
-       enabled_irqs = intel_hpd_enabled_irqs(dev_priv, pins);
+       enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
 
        I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
 
@@ -3044,8 +3081,7 @@ static void mcc_hpd_irq_setup(struct drm_i915_private *dev_priv)
 {
        icp_hpd_irq_setup(dev_priv,
                          SDE_DDI_MASK_ICP, SDE_TC_HOTPLUG_ICP(PORT_TC1),
-                         ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE(PORT_TC1),
-                         hpd_icp);
+                         ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE(PORT_TC1));
 }
 
 /*
@@ -3057,8 +3093,7 @@ static void jsp_hpd_irq_setup(struct drm_i915_private *dev_priv)
 {
        icp_hpd_irq_setup(dev_priv,
                          SDE_DDI_MASK_TGP, 0,
-                         TGP_DDI_HPD_ENABLE_MASK, 0,
-                         hpd_tgp);
+                         TGP_DDI_HPD_ENABLE_MASK, 0);
 }
 
 static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
@@ -3083,11 +3118,9 @@ static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
 static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
 {
        u32 hotplug_irqs, enabled_irqs;
-       const u32 *hpd;
        u32 val;
 
-       hpd = INTEL_GEN(dev_priv) >= 12 ? hpd_gen12 : hpd_gen11;
-       enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd);
+       enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
        hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
 
        val = I915_READ(GEN11_DE_HPD_IMR);
@@ -3099,12 +3132,10 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
 
        if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP)
                icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_TGP, SDE_TC_MASK_TGP,
-                                 TGP_DDI_HPD_ENABLE_MASK,
-                                 TGP_TC_HPD_ENABLE_MASK, hpd_tgp);
+                                 TGP_DDI_HPD_ENABLE_MASK, TGP_TC_HPD_ENABLE_MASK);
        else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
                icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_ICP, SDE_TC_MASK_ICP,
-                                 ICP_DDI_HPD_ENABLE_MASK,
-                                 ICP_TC_HPD_ENABLE_MASK, hpd_icp);
+                                 ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE_MASK);
 }
 
 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
@@ -3140,7 +3171,7 @@ static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
                I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
 
        hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
-       enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
+       enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
 
        ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
 
@@ -3169,17 +3200,17 @@ static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
 
        if (INTEL_GEN(dev_priv) >= 8) {
                hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
-               enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
+               enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
 
                bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
        } else if (INTEL_GEN(dev_priv) >= 7) {
                hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
-               enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
+               enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
 
                ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
        } else {
                hotplug_irqs = DE_DP_A_HOTPLUG;
-               enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
+               enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
 
                ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
        }
@@ -3230,7 +3261,7 @@ static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
 {
        u32 hotplug_irqs, enabled_irqs;
 
-       enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
+       enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
        hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
 
        bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3646,7 +3677,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
                intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
 
                if (iir & I915_USER_INTERRUPT)
-                       intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
+                       intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
 
                if (iir & I915_MASTER_ERROR_INTERRUPT)
                        i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -3751,7 +3782,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
                I915_WRITE(GEN2_IIR, iir);
 
                if (iir & I915_USER_INTERRUPT)
-                       intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
+                       intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
 
                if (iir & I915_MASTER_ERROR_INTERRUPT)
                        i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -3893,10 +3924,10 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
                I915_WRITE(GEN2_IIR, iir);
 
                if (iir & I915_USER_INTERRUPT)
-                       intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
+                       intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
 
                if (iir & I915_BSD_USER_INTERRUPT)
-                       intel_engine_signal_breadcrumbs(dev_priv->engine[VCS0]);
+                       intel_engine_signal_breadcrumbs(dev_priv->gt.engine[VCS0]);
 
                if (iir & I915_MASTER_ERROR_INTERRUPT)
                        i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -3924,6 +3955,8 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
        struct drm_device *dev = &dev_priv->drm;
        int i;
 
+       intel_hpd_init_pins(dev_priv);
+
        intel_hpd_init_work(dev_priv);
 
        INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
index 2c80a0194c80dfcc4b655ebaf9890b9279222e70..eb0b5be7c35d33305fbb490aaf0aa2c1e83b0fce 100644 (file)
        GEN(2), \
        .is_mobile = 1, \
        .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+       .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
        .display.has_overlay = 1, \
        .display.cursor_needs_physical = 1, \
        .display.overlay_needs_physical = 1, \
        .engine_mask = BIT(RCS0), \
        .has_snoop = true, \
        .has_coherent_ggtt = false, \
+       .dma_mask_size = 32, \
        I9XX_PIPE_OFFSETS, \
        I9XX_CURSOR_OFFSETS, \
        I9XX_COLORS, \
 #define I845_FEATURES \
        GEN(2), \
        .pipe_mask = BIT(PIPE_A), \
+       .cpu_transcoder_mask = BIT(TRANSCODER_A), \
        .display.has_overlay = 1, \
        .display.overlay_needs_physical = 1, \
        .display.has_gmch = 1, \
        .engine_mask = BIT(RCS0), \
        .has_snoop = true, \
        .has_coherent_ggtt = false, \
+       .dma_mask_size = 32, \
        I845_PIPE_OFFSETS, \
        I845_CURSOR_OFFSETS, \
        I9XX_COLORS, \
@@ -218,11 +222,13 @@ static const struct intel_device_info i865g_info = {
 #define GEN3_FEATURES \
        GEN(3), \
        .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+       .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
        .display.has_gmch = 1, \
        .gpu_reset_clobbers_display = true, \
        .engine_mask = BIT(RCS0), \
        .has_snoop = true, \
        .has_coherent_ggtt = true, \
+       .dma_mask_size = 32, \
        I9XX_PIPE_OFFSETS, \
        I9XX_CURSOR_OFFSETS, \
        I9XX_COLORS, \
@@ -283,6 +289,7 @@ static const struct intel_device_info g33_info = {
        PLATFORM(INTEL_G33),
        .display.has_hotplug = 1,
        .display.has_overlay = 1,
+       .dma_mask_size = 36,
 };
 
 static const struct intel_device_info pnv_g_info = {
@@ -290,6 +297,7 @@ static const struct intel_device_info pnv_g_info = {
        PLATFORM(INTEL_PINEVIEW),
        .display.has_hotplug = 1,
        .display.has_overlay = 1,
+       .dma_mask_size = 36,
 };
 
 static const struct intel_device_info pnv_m_info = {
@@ -298,17 +306,20 @@ static const struct intel_device_info pnv_m_info = {
        .is_mobile = 1,
        .display.has_hotplug = 1,
        .display.has_overlay = 1,
+       .dma_mask_size = 36,
 };
 
 #define GEN4_FEATURES \
        GEN(4), \
        .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+       .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
        .display.has_hotplug = 1, \
        .display.has_gmch = 1, \
        .gpu_reset_clobbers_display = true, \
        .engine_mask = BIT(RCS0), \
        .has_snoop = true, \
        .has_coherent_ggtt = true, \
+       .dma_mask_size = 36, \
        I9XX_PIPE_OFFSETS, \
        I9XX_CURSOR_OFFSETS, \
        I965_COLORS, \
@@ -354,12 +365,14 @@ static const struct intel_device_info gm45_info = {
 #define GEN5_FEATURES \
        GEN(5), \
        .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+       .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
        .display.has_hotplug = 1, \
        .engine_mask = BIT(RCS0) | BIT(VCS0), \
        .has_snoop = true, \
        .has_coherent_ggtt = true, \
        /* ilk does support rc6, but we do not implement [power] contexts */ \
        .has_rc6 = 0, \
+       .dma_mask_size = 36, \
        I9XX_PIPE_OFFSETS, \
        I9XX_CURSOR_OFFSETS, \
        ILK_COLORS, \
@@ -381,6 +394,7 @@ static const struct intel_device_info ilk_m_info = {
 #define GEN6_FEATURES \
        GEN(6), \
        .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+       .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
        .display.has_hotplug = 1, \
        .display.has_fbc = 1, \
        .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
@@ -389,6 +403,7 @@ static const struct intel_device_info ilk_m_info = {
        .has_rc6 = 1, \
        .has_rc6p = 1, \
        .has_rps = true, \
+       .dma_mask_size = 40, \
        .ppgtt_type = INTEL_PPGTT_ALIASING, \
        .ppgtt_size = 31, \
        I9XX_PIPE_OFFSETS, \
@@ -430,6 +445,7 @@ static const struct intel_device_info snb_m_gt2_info = {
 #define GEN7_FEATURES  \
        GEN(7), \
        .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
+       .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), \
        .display.has_hotplug = 1, \
        .display.has_fbc = 1, \
        .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
@@ -438,6 +454,7 @@ static const struct intel_device_info snb_m_gt2_info = {
        .has_rc6 = 1, \
        .has_rc6p = 1, \
        .has_rps = true, \
+       .dma_mask_size = 40, \
        .ppgtt_type = INTEL_PPGTT_ALIASING, \
        .ppgtt_size = 31, \
        IVB_PIPE_OFFSETS, \
@@ -482,6 +499,7 @@ static const struct intel_device_info ivb_q_info = {
        PLATFORM(INTEL_IVYBRIDGE),
        .gt = 2,
        .pipe_mask = 0, /* legal, last one wins */
+       .cpu_transcoder_mask = 0,
        .has_l3_dpf = 1,
 };
 
@@ -490,11 +508,13 @@ static const struct intel_device_info vlv_info = {
        GEN(7),
        .is_lp = 1,
        .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B),
+       .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B),
        .has_runtime_pm = 1,
        .has_rc6 = 1,
        .has_rps = true,
        .display.has_gmch = 1,
        .display.has_hotplug = 1,
+       .dma_mask_size = 40,
        .ppgtt_type = INTEL_PPGTT_ALIASING,
        .ppgtt_size = 31,
        .has_snoop = true,
@@ -511,6 +531,8 @@ static const struct intel_device_info vlv_info = {
 #define G75_FEATURES  \
        GEN7_FEATURES, \
        .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
+       .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
+               BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP), \
        .display.has_ddi = 1, \
        .has_fpga_dbg = 1, \
        .display.has_psr = 1, \
@@ -543,6 +565,7 @@ static const struct intel_device_info hsw_gt3_info = {
        G75_FEATURES, \
        GEN(8), \
        .has_logical_ring_contexts = 1, \
+       .dma_mask_size = 39, \
        .ppgtt_type = INTEL_PPGTT_FULL, \
        .ppgtt_size = 48, \
        .has_64bit_reloc = 1, \
@@ -581,6 +604,7 @@ static const struct intel_device_info chv_info = {
        PLATFORM(INTEL_CHERRYVIEW),
        GEN(8),
        .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+       .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C),
        .display.has_hotplug = 1,
        .is_lp = 1,
        .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0),
@@ -590,7 +614,8 @@ static const struct intel_device_info chv_info = {
        .has_rps = true,
        .has_logical_ring_contexts = 1,
        .display.has_gmch = 1,
-       .ppgtt_type = INTEL_PPGTT_ALIASING,
+       .dma_mask_size = 39,
+       .ppgtt_type = INTEL_PPGTT_FULL,
        .ppgtt_size = 32,
        .has_reset_engine = 1,
        .has_snoop = true,
@@ -656,6 +681,9 @@ static const struct intel_device_info skl_gt4_info = {
        .display.has_hotplug = 1, \
        .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
        .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
+       .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
+               BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \
+               BIT(TRANSCODER_DSI_A) | BIT(TRANSCODER_DSI_C), \
        .has_64bit_reloc = 1, \
        .display.has_ddi = 1, \
        .has_fpga_dbg = 1, \
@@ -670,6 +698,7 @@ static const struct intel_device_info skl_gt4_info = {
        .has_logical_ring_contexts = 1, \
        .has_logical_ring_preemption = 1, \
        .has_gt_uc = 1, \
+       .dma_mask_size = 39, \
        .ppgtt_type = INTEL_PPGTT_FULL, \
        .ppgtt_size = 48, \
        .has_reset_engine = 1, \
@@ -759,6 +788,9 @@ static const struct intel_device_info cnl_info = {
 #define GEN11_FEATURES \
        GEN10_FEATURES, \
        GEN11_DEFAULT_PAGE_SIZES, \
+       .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
+               BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \
+               BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \
        .pipe_offsets = { \
                [TRANSCODER_A] = PIPE_A_OFFSET, \
                [TRANSCODER_B] = PIPE_B_OFFSET, \
@@ -799,6 +831,10 @@ static const struct intel_device_info ehl_info = {
 #define GEN12_FEATURES \
        GEN11_FEATURES, \
        GEN(12), \
+       .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \
+       .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
+               BIT(TRANSCODER_C) | BIT(TRANSCODER_D) | \
+               BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \
        .pipe_offsets = { \
                [TRANSCODER_A] = PIPE_A_OFFSET, \
                [TRANSCODER_B] = PIPE_B_OFFSET, \
@@ -822,7 +858,6 @@ static const struct intel_device_info ehl_info = {
 static const struct intel_device_info tgl_info = {
        GEN12_FEATURES,
        PLATFORM(INTEL_TIGERLAKE),
-       .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
        .display.has_modular_fia = 1,
        .engine_mask =
                BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
@@ -920,8 +955,6 @@ static void i915_pci_remove(struct pci_dev *pdev)
 
        i915_driver_remove(i915);
        pci_set_drvdata(pdev, NULL);
-
-       drm_dev_put(&i915->drm);
 }
 
 /* is device_id present in comma separated list of ids */
index 60da28d412d6d0dbf24743f44eb2099611a983ab..75c60c2afb7ea280d4fcbea3dcdaed96d73bb2c1 100644 (file)
 
 #include "i915_drv.h"
 #include "i915_perf.h"
-#include "oa/i915_oa_hsw.h"
-#include "oa/i915_oa_bdw.h"
-#include "oa/i915_oa_chv.h"
-#include "oa/i915_oa_sklgt2.h"
-#include "oa/i915_oa_sklgt3.h"
-#include "oa/i915_oa_sklgt4.h"
-#include "oa/i915_oa_bxt.h"
-#include "oa/i915_oa_kblgt2.h"
-#include "oa/i915_oa_kblgt3.h"
-#include "oa/i915_oa_glk.h"
-#include "oa/i915_oa_cflgt2.h"
-#include "oa/i915_oa_cflgt3.h"
-#include "oa/i915_oa_cnl.h"
-#include "oa/i915_oa_icl.h"
-#include "oa/i915_oa_tgl.h"
 
 /* HW requires this to be a power of two, between 128k and 16M, though driver
  * is currently generally designed assuming the largest 16M size is used such
  *
  * Although this can be observed explicitly while copying reports to userspace
  * by checking for a zeroed report-id field in tail reports, we want to account
- * for this earlier, as part of the oa_buffer_check to avoid lots of redundant
- * read() attempts.
- *
- * In effect we define a tail pointer for reading that lags the real tail
- * pointer by at least %OA_TAIL_MARGIN_NSEC nanoseconds, which gives enough
- * time for the corresponding reports to become visible to the CPU.
- *
- * To manage this we actually track two tail pointers:
- *  1) An 'aging' tail with an associated timestamp that is tracked until we
- *     can trust the corresponding data is visible to the CPU; at which point
- *     it is considered 'aged'.
- *  2) An 'aged' tail that can be used for read()ing.
- *
- * The two separate pointers let us decouple read()s from tail pointer aging.
- *
- * The tail pointers are checked and updated at a limited rate within a hrtimer
- * callback (the same callback that is used for delivering EPOLLIN events)
- *
- * Initially the tails are marked invalid with %INVALID_TAIL_PTR which
- * indicates that an updated tail pointer is needed.
+ * for this earlier, as part of the oa_buffer_check_unlocked to avoid lots of
+ * redundant read() attempts.
+ *
+ * We workaround this issue in oa_buffer_check_unlocked() by reading the reports
+ * in the OA buffer, starting from the tail reported by the HW until we find a
+ * report with its first 2 dwords not 0 meaning its previous report is
+ * completely in memory and ready to be read. Those dwords are also set to 0
+ * once read and the whole buffer is cleared upon OA buffer initialization. The
+ * first dword is the reason for this report while the second is the timestamp,
+ * making the chances of having those 2 fields at 0 fairly unlikely. A more
+ * detailed explanation is available in oa_buffer_check_unlocked().
  *
  * Most of the implementation details for this workaround are in
  * oa_buffer_check_unlocked() and _append_oa_reports()
 #define OA_TAIL_MARGIN_NSEC    100000ULL
 #define INVALID_TAIL_PTR       0xffffffff
 
-/* frequency for checking whether the OA unit has written new reports to the
- * circular OA buffer...
+/* The default frequency for checking whether the OA unit has written new
+ * reports to the circular OA buffer...
  */
-#define POLL_FREQUENCY 200
-#define POLL_PERIOD (NSEC_PER_SEC / POLL_FREQUENCY)
+#define DEFAULT_POLL_FREQUENCY_HZ 200
+#define DEFAULT_POLL_PERIOD_NS (NSEC_PER_SEC / DEFAULT_POLL_FREQUENCY_HZ)
 
 /* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */
 static u32 i915_perf_stream_paranoid = true;
@@ -359,6 +335,12 @@ static const struct i915_oa_format gen12_oa_formats[I915_OA_FORMAT_MAX] = {
  * @oa_periodic: Whether to enable periodic OA unit sampling
  * @oa_period_exponent: The OA unit sampling period is derived from this
  * @engine: The engine (typically rcs0) being monitored by the OA unit
+ * @has_sseu: Whether @sseu was specified by userspace
+ * @sseu: internal SSEU configuration computed either from the userspace
+ *        specified configuration in the opening parameters or a default value
+ *        (see get_default_sseu_config())
+ * @poll_oa_period: The period in nanoseconds at which the CPU will check for OA
+ * data availability
  *
  * As read_properties_unlocked() enumerates and validates the properties given
  * to open a stream of metrics the configuration is built up in the structure
@@ -378,6 +360,11 @@ struct perf_open_properties {
        int oa_period_exponent;
 
        struct intel_engine_cs *engine;
+
+       bool has_sseu;
+       struct intel_sseu sseu;
+
+       u64 poll_oa_period;
 };
 
 struct i915_oa_config_bo {
@@ -409,10 +396,7 @@ i915_perf_get_oa_config(struct i915_perf *perf, int metrics_set)
        struct i915_oa_config *oa_config;
 
        rcu_read_lock();
-       if (metrics_set == 1)
-               oa_config = &perf->test_config;
-       else
-               oa_config = idr_find(&perf->metrics_idr, metrics_set);
+       oa_config = idr_find(&perf->metrics_idr, metrics_set);
        if (oa_config)
                oa_config = i915_oa_config_get(oa_config);
        rcu_read_unlock();
@@ -465,8 +449,8 @@ static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream)
  * (See description of OA_TAIL_MARGIN_NSEC above for further details.)
  *
  * Besides returning true when there is data available to read() this function
- * also has the side effect of updating the oa_buffer.tails[], .aging_timestamp
- * and .aged_tail_idx state used for reading.
+ * also updates the tail, aging_tail and aging_timestamp in the oa_buffer
+ * object.
  *
  * Note: It's safe to read OA config state here unlocked, assuming that this is
  * only called while the stream is enabled, while the global OA configuration
@@ -476,28 +460,19 @@ static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream)
  */
 static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
 {
+       u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
        int report_size = stream->oa_buffer.format_size;
        unsigned long flags;
-       unsigned int aged_idx;
-       u32 head, hw_tail, aged_tail, aging_tail;
+       bool pollin;
+       u32 hw_tail;
        u64 now;
 
        /* We have to consider the (unlikely) possibility that read() errors
-        * could result in an OA buffer reset which might reset the head,
-        * tails[] and aged_tail state.
+        * could result in an OA buffer reset which might reset the head and
+        * tail state.
         */
        spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
 
-       /* NB: The head we observe here might effectively be a little out of
-        * date (between head and tails[aged_idx].offset if there is currently
-        * a read() in progress.
-        */
-       head = stream->oa_buffer.head;
-
-       aged_idx = stream->oa_buffer.aged_tail_idx;
-       aged_tail = stream->oa_buffer.tails[aged_idx].offset;
-       aging_tail = stream->oa_buffer.tails[!aged_idx].offset;
-
        hw_tail = stream->perf->ops.oa_hw_tail_read(stream);
 
        /* The tail pointer increases in 64 byte increments,
@@ -507,64 +482,63 @@ static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
 
        now = ktime_get_mono_fast_ns();
 
-       /* Update the aged tail
-        *
-        * Flip the tail pointer available for read()s once the aging tail is
-        * old enough to trust that the corresponding data will be visible to
-        * the CPU...
-        *
-        * Do this before updating the aging pointer in case we may be able to
-        * immediately start aging a new pointer too (if new data has become
-        * available) without needing to wait for a later hrtimer callback.
-        */
-       if (aging_tail != INVALID_TAIL_PTR &&
-           ((now - stream->oa_buffer.aging_timestamp) >
-            OA_TAIL_MARGIN_NSEC)) {
-
-               aged_idx ^= 1;
-               stream->oa_buffer.aged_tail_idx = aged_idx;
+       if (hw_tail == stream->oa_buffer.aging_tail &&
+           (now - stream->oa_buffer.aging_timestamp) > OA_TAIL_MARGIN_NSEC) {
+               /* If the HW tail hasn't move since the last check and the HW
+                * tail has been aging for long enough, declare it the new
+                * tail.
+                */
+               stream->oa_buffer.tail = stream->oa_buffer.aging_tail;
+       } else {
+               u32 head, tail, aged_tail;
 
-               aged_tail = aging_tail;
+               /* NB: The head we observe here might effectively be a little
+                * out of date. If a read() is in progress, the head could be
+                * anywhere between this head and stream->oa_buffer.tail.
+                */
+               head = stream->oa_buffer.head - gtt_offset;
+               aged_tail = stream->oa_buffer.tail - gtt_offset;
+
+               hw_tail -= gtt_offset;
+               tail = hw_tail;
+
+               /* Walk the stream backward until we find a report with dword 0
+                * & 1 not at 0. Since the circular buffer pointers progress by
+                * increments of 64 bytes and that reports can be up to 256
+                * bytes long, we can't tell whether a report has fully landed
+                * in memory before the first 2 dwords of the following report
+                * have effectively landed.
+                *
+                * This is assuming that the writes of the OA unit land in
+                * memory in the order they were written to.
+                * If not : (╯°□°)╯︵ ┻━┻
+                */
+               while (OA_TAKEN(tail, aged_tail) >= report_size) {
+                       u32 *report32 = (void *)(stream->oa_buffer.vaddr + tail);
 
-               /* Mark that we need a new pointer to start aging... */
-               stream->oa_buffer.tails[!aged_idx].offset = INVALID_TAIL_PTR;
-               aging_tail = INVALID_TAIL_PTR;
-       }
+                       if (report32[0] != 0 || report32[1] != 0)
+                               break;
 
-       /* Update the aging tail
-        *
-        * We throttle aging tail updates until we have a new tail that
-        * represents >= one report more data than is already available for
-        * reading. This ensures there will be enough data for a successful
-        * read once this new pointer has aged and ensures we will give the new
-        * pointer time to age.
-        */
-       if (aging_tail == INVALID_TAIL_PTR &&
-           (aged_tail == INVALID_TAIL_PTR ||
-            OA_TAKEN(hw_tail, aged_tail) >= report_size)) {
-               struct i915_vma *vma = stream->oa_buffer.vma;
-               u32 gtt_offset = i915_ggtt_offset(vma);
-
-               /* Be paranoid and do a bounds check on the pointer read back
-                * from hardware, just in case some spurious hardware condition
-                * could put the tail out of bounds...
-                */
-               if (hw_tail >= gtt_offset &&
-                   hw_tail < (gtt_offset + OA_BUFFER_SIZE)) {
-                       stream->oa_buffer.tails[!aged_idx].offset =
-                               aging_tail = hw_tail;
-                       stream->oa_buffer.aging_timestamp = now;
-               } else {
-                       drm_err(&stream->perf->i915->drm,
-                               "Ignoring spurious out of range OA buffer tail pointer = %x\n",
-                               hw_tail);
+                       tail = (tail - report_size) & (OA_BUFFER_SIZE - 1);
                }
+
+               if (OA_TAKEN(hw_tail, tail) > report_size &&
+                   __ratelimit(&stream->perf->tail_pointer_race))
+                       DRM_NOTE("unlanded report(s) head=0x%x "
+                                "tail=0x%x hw_tail=0x%x\n",
+                                head, tail, hw_tail);
+
+               stream->oa_buffer.tail = gtt_offset + tail;
+               stream->oa_buffer.aging_tail = gtt_offset + hw_tail;
+               stream->oa_buffer.aging_timestamp = now;
        }
 
+       pollin = OA_TAKEN(stream->oa_buffer.tail - gtt_offset,
+                         stream->oa_buffer.head - gtt_offset) >= report_size;
+
        spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
 
-       return aged_tail == INVALID_TAIL_PTR ?
-               false : OA_TAKEN(aged_tail, head) >= report_size;
+       return pollin;
 }
 
 /**
@@ -682,7 +656,6 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
        u32 mask = (OA_BUFFER_SIZE - 1);
        size_t start_offset = *offset;
        unsigned long flags;
-       unsigned int aged_tail_idx;
        u32 head, tail;
        u32 taken;
        int ret = 0;
@@ -693,18 +666,10 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
        spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
 
        head = stream->oa_buffer.head;
-       aged_tail_idx = stream->oa_buffer.aged_tail_idx;
-       tail = stream->oa_buffer.tails[aged_tail_idx].offset;
+       tail = stream->oa_buffer.tail;
 
        spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
 
-       /*
-        * An invalid tail pointer here means we're still waiting for the poll
-        * hrtimer callback to give us a pointer
-        */
-       if (tail == INVALID_TAIL_PTR)
-               return -EAGAIN;
-
        /*
         * NB: oa_buffer.head/tail include the gtt_offset which we don't want
         * while indexing relative to oa_buf_base.
@@ -838,13 +803,11 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
                }
 
                /*
-                * The above reason field sanity check is based on
-                * the assumption that the OA buffer is initially
-                * zeroed and we reset the field after copying so the
-                * check is still meaningful once old reports start
-                * being overwritten.
+                * Clear out the first 2 dword as a mean to detect unlanded
+                * reports.
                 */
                report32[0] = 0;
+               report32[1] = 0;
        }
 
        if (start_offset != *offset) {
@@ -985,7 +948,6 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
        u32 mask = (OA_BUFFER_SIZE - 1);
        size_t start_offset = *offset;
        unsigned long flags;
-       unsigned int aged_tail_idx;
        u32 head, tail;
        u32 taken;
        int ret = 0;
@@ -996,17 +958,10 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
        spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
 
        head = stream->oa_buffer.head;
-       aged_tail_idx = stream->oa_buffer.aged_tail_idx;
-       tail = stream->oa_buffer.tails[aged_tail_idx].offset;
+       tail = stream->oa_buffer.tail;
 
        spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
 
-       /* An invalid tail pointer here means we're still waiting for the poll
-        * hrtimer callback to give us a pointer
-        */
-       if (tail == INVALID_TAIL_PTR)
-               return -EAGAIN;
-
        /* NB: oa_buffer.head/tail include the gtt_offset which we don't want
         * while indexing relative to oa_buf_base.
         */
@@ -1064,13 +1019,11 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
                if (ret)
                        break;
 
-               /* The above report-id field sanity check is based on
-                * the assumption that the OA buffer is initially
-                * zeroed and we reset the field after copying so the
-                * check is still meaningful once old reports start
-                * being overwritten.
+               /* Clear out the first 2 dwords as a mean to detect unlanded
+                * reports.
                 */
                report32[0] = 0;
+               report32[1] = 0;
        }
 
        if (start_offset != *offset) {
@@ -1447,8 +1400,8 @@ static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
                           gtt_offset | OABUFFER_SIZE_16M);
 
        /* Mark that we need updated tail pointers to read from... */
-       stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
-       stream->oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
+       stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
+       stream->oa_buffer.tail = gtt_offset;
 
        spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
 
@@ -1470,8 +1423,6 @@ static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
         * memory...
         */
        memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
-
-       stream->pollin = false;
 }
 
 static void gen8_init_oa_buffer(struct i915_perf_stream *stream)
@@ -1501,8 +1452,8 @@ static void gen8_init_oa_buffer(struct i915_perf_stream *stream)
        intel_uncore_write(uncore, GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
 
        /* Mark that we need updated tail pointers to read from... */
-       stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
-       stream->oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
+       stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
+       stream->oa_buffer.tail = gtt_offset;
 
        /*
         * Reset state used to recognise context switches, affecting which
@@ -1526,8 +1477,6 @@ static void gen8_init_oa_buffer(struct i915_perf_stream *stream)
         * memory...
         */
        memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
-
-       stream->pollin = false;
 }
 
 static void gen12_init_oa_buffer(struct i915_perf_stream *stream)
@@ -1557,8 +1506,8 @@ static void gen12_init_oa_buffer(struct i915_perf_stream *stream)
                           gtt_offset & GEN12_OAG_OATAILPTR_MASK);
 
        /* Mark that we need updated tail pointers to read from... */
-       stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
-       stream->oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
+       stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
+       stream->oa_buffer.tail = gtt_offset;
 
        /*
         * Reset state used to recognise context switches, affecting which
@@ -1583,8 +1532,6 @@ static void gen12_init_oa_buffer(struct i915_perf_stream *stream)
         */
        memset(stream->oa_buffer.vaddr, 0,
               stream->oa_buffer.vma->size);
-
-       stream->pollin = false;
 }
 
 static int alloc_oa_buffer(struct i915_perf_stream *stream)
@@ -1665,10 +1612,7 @@ static int alloc_noa_wait(struct i915_perf_stream *stream)
        struct drm_i915_gem_object *bo;
        struct i915_vma *vma;
        const u64 delay_ticks = 0xffffffffffffffff -
-               DIV64_U64_ROUND_UP(
-                       atomic64_read(&stream->perf->noa_programming_delay) *
-                       RUNTIME_INFO(i915)->cs_timestamp_frequency_khz,
-                       1000000ull);
+               i915_cs_timestamp_ns_to_ticks(i915, atomic64_read(&stream->perf->noa_programming_delay));
        const u32 base = stream->engine->mmio_base;
 #define CS_GPR(x) GEN8_RING_CS_GPR(base, x)
        u32 *batch, *ts0, *cs, *jump;
@@ -1970,10 +1914,11 @@ out:
        return i915_vma_get(oa_bo->vma);
 }
 
-static struct i915_request *
+static int
 emit_oa_config(struct i915_perf_stream *stream,
               struct i915_oa_config *oa_config,
-              struct intel_context *ce)
+              struct intel_context *ce,
+              struct i915_active *active)
 {
        struct i915_request *rq;
        struct i915_vma *vma;
@@ -1981,7 +1926,7 @@ emit_oa_config(struct i915_perf_stream *stream,
 
        vma = get_oa_vma(stream, oa_config);
        if (IS_ERR(vma))
-               return ERR_CAST(vma);
+               return PTR_ERR(vma);
 
        err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
        if (err)
@@ -1995,6 +1940,18 @@ emit_oa_config(struct i915_perf_stream *stream,
                goto err_vma_unpin;
        }
 
+       if (!IS_ERR_OR_NULL(active)) {
+               /* After all individual context modifications */
+               err = i915_request_await_active(rq, active,
+                                               I915_ACTIVE_AWAIT_ACTIVE);
+               if (err)
+                       goto err_add_request;
+
+               err = i915_active_add_request(active, rq);
+               if (err)
+                       goto err_add_request;
+       }
+
        i915_vma_lock(vma);
        err = i915_request_await_object(rq, vma->obj, 0);
        if (!err)
@@ -2009,14 +1966,13 @@ emit_oa_config(struct i915_perf_stream *stream,
        if (err)
                goto err_add_request;
 
-       i915_request_get(rq);
 err_add_request:
        i915_request_add(rq);
 err_vma_unpin:
        i915_vma_unpin(vma);
 err_vma_put:
        i915_vma_put(vma);
-       return err ? ERR_PTR(err) : rq;
+       return err;
 }
 
 static struct intel_context *oa_context(struct i915_perf_stream *stream)
@@ -2024,8 +1980,9 @@ static struct intel_context *oa_context(struct i915_perf_stream *stream)
        return stream->pinned_ctx ?: stream->engine->kernel_context;
 }
 
-static struct i915_request *
-hsw_enable_metric_set(struct i915_perf_stream *stream)
+static int
+hsw_enable_metric_set(struct i915_perf_stream *stream,
+                     struct i915_active *active)
 {
        struct intel_uncore *uncore = stream->uncore;
 
@@ -2044,7 +2001,9 @@ hsw_enable_metric_set(struct i915_perf_stream *stream)
        intel_uncore_rmw(uncore, GEN6_UCGCTL1,
                         0, GEN6_CSUNIT_CLOCK_GATE_DISABLE);
 
-       return emit_oa_config(stream, stream->oa_config, oa_context(stream));
+       return emit_oa_config(stream,
+                             stream->oa_config, oa_context(stream),
+                             active);
 }
 
 static void hsw_disable_metric_set(struct i915_perf_stream *stream)
@@ -2114,9 +2073,6 @@ gen8_update_reg_state_unlocked(const struct intel_context *ce,
        for (i = 0; i < ARRAY_SIZE(flex_regs); i++)
                reg_state[ctx_flexeu0 + i * 2 + 1] =
                        oa_config_flex_reg(stream->oa_config, flex_regs[i]);
-
-       reg_state[CTX_R_PWR_CLK_STATE] =
-               intel_sseu_make_rpcs(ce->engine->i915, &ce->sseu);
 }
 
 struct flex {
@@ -2137,7 +2093,7 @@ gen8_store_flex(struct i915_request *rq,
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
-       offset = i915_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
+       offset = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET;
        do {
                *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
                *cs++ = offset + flex->offset * sizeof(u32);
@@ -2194,8 +2150,10 @@ static int gen8_modify_context(struct intel_context *ce,
        return err;
 }
 
-static int gen8_modify_self(struct intel_context *ce,
-                           const struct flex *flex, unsigned int count)
+static int
+gen8_modify_self(struct intel_context *ce,
+                const struct flex *flex, unsigned int count,
+                struct i915_active *active)
 {
        struct i915_request *rq;
        int err;
@@ -2206,8 +2164,17 @@ static int gen8_modify_self(struct intel_context *ce,
        if (IS_ERR(rq))
                return PTR_ERR(rq);
 
+       if (!IS_ERR_OR_NULL(active)) {
+               err = i915_active_add_request(active, rq);
+               if (err)
+                       goto err_add_request;
+       }
+
        err = gen8_load_flex(rq, ce, flex, count);
+       if (err)
+               goto err_add_request;
 
+err_add_request:
        i915_request_add(rq);
        return err;
 }
@@ -2241,7 +2208,8 @@ static int gen8_configure_context(struct i915_gem_context *ctx,
        return err;
 }
 
-static int gen12_configure_oar_context(struct i915_perf_stream *stream, bool enable)
+static int gen12_configure_oar_context(struct i915_perf_stream *stream,
+                                      struct i915_active *active)
 {
        int err;
        struct intel_context *ce = stream->pinned_ctx;
@@ -2250,7 +2218,7 @@ static int gen12_configure_oar_context(struct i915_perf_stream *stream, bool ena
                {
                        GEN8_OACTXCONTROL,
                        stream->perf->ctx_oactxctrl_offset + 1,
-                       enable ? GEN8_OA_COUNTER_RESUME : 0,
+                       active ? GEN8_OA_COUNTER_RESUME : 0,
                },
        };
        /* Offsets in regs_lri are not used since this configuration is only
@@ -2262,13 +2230,13 @@ static int gen12_configure_oar_context(struct i915_perf_stream *stream, bool ena
                        GEN12_OAR_OACONTROL,
                        GEN12_OAR_OACONTROL_OFFSET + 1,
                        (format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) |
-                       (enable ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0)
+                       (active ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0)
                },
                {
                        RING_CONTEXT_CONTROL(ce->engine->mmio_base),
                        CTX_CONTEXT_CONTROL,
                        _MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE,
-                                     enable ?
+                                     active ?
                                      GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE :
                                      0)
                },
@@ -2285,7 +2253,7 @@ static int gen12_configure_oar_context(struct i915_perf_stream *stream, bool ena
                return err;
 
        /* Apply regs_lri using LRI with pinned context */
-       return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri));
+       return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri), active);
 }
 
 /*
@@ -2313,9 +2281,11 @@ static int gen12_configure_oar_context(struct i915_perf_stream *stream, bool ena
  * Note: it's only the RCS/Render context that has any OA state.
  * Note: the first flex register passed must always be R_PWR_CLK_STATE
  */
-static int oa_configure_all_contexts(struct i915_perf_stream *stream,
-                                    struct flex *regs,
-                                    size_t num_regs)
+static int
+oa_configure_all_contexts(struct i915_perf_stream *stream,
+                         struct flex *regs,
+                         size_t num_regs,
+                         struct i915_active *active)
 {
        struct drm_i915_private *i915 = stream->perf->i915;
        struct intel_engine_cs *engine;
@@ -2372,7 +2342,7 @@ static int oa_configure_all_contexts(struct i915_perf_stream *stream,
 
                regs[0].value = intel_sseu_make_rpcs(i915, &ce->sseu);
 
-               err = gen8_modify_self(ce, regs, num_regs);
+               err = gen8_modify_self(ce, regs, num_regs, active);
                if (err)
                        return err;
        }
@@ -2380,8 +2350,10 @@ static int oa_configure_all_contexts(struct i915_perf_stream *stream,
        return 0;
 }
 
-static int gen12_configure_all_contexts(struct i915_perf_stream *stream,
-                                       const struct i915_oa_config *oa_config)
+static int
+gen12_configure_all_contexts(struct i915_perf_stream *stream,
+                            const struct i915_oa_config *oa_config,
+                            struct i915_active *active)
 {
        struct flex regs[] = {
                {
@@ -2390,11 +2362,15 @@ static int gen12_configure_all_contexts(struct i915_perf_stream *stream,
                },
        };
 
-       return oa_configure_all_contexts(stream, regs, ARRAY_SIZE(regs));
+       return oa_configure_all_contexts(stream,
+                                        regs, ARRAY_SIZE(regs),
+                                        active);
 }
 
-static int lrc_configure_all_contexts(struct i915_perf_stream *stream,
-                                     const struct i915_oa_config *oa_config)
+static int
+lrc_configure_all_contexts(struct i915_perf_stream *stream,
+                          const struct i915_oa_config *oa_config,
+                          struct i915_active *active)
 {
        /* The MMIO offsets for Flex EU registers aren't contiguous */
        const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
@@ -2427,11 +2403,14 @@ static int lrc_configure_all_contexts(struct i915_perf_stream *stream,
        for (i = 2; i < ARRAY_SIZE(regs); i++)
                regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg);
 
-       return oa_configure_all_contexts(stream, regs, ARRAY_SIZE(regs));
+       return oa_configure_all_contexts(stream,
+                                        regs, ARRAY_SIZE(regs),
+                                        active);
 }
 
-static struct i915_request *
-gen8_enable_metric_set(struct i915_perf_stream *stream)
+static int
+gen8_enable_metric_set(struct i915_perf_stream *stream,
+                      struct i915_active *active)
 {
        struct intel_uncore *uncore = stream->uncore;
        struct i915_oa_config *oa_config = stream->oa_config;
@@ -2471,11 +2450,13 @@ gen8_enable_metric_set(struct i915_perf_stream *stream)
         * to make sure all slices/subslices are ON before writing to NOA
         * registers.
         */
-       ret = lrc_configure_all_contexts(stream, oa_config);
+       ret = lrc_configure_all_contexts(stream, oa_config, active);
        if (ret)
-               return ERR_PTR(ret);
+               return ret;
 
-       return emit_oa_config(stream, oa_config, oa_context(stream));
+       return emit_oa_config(stream,
+                             stream->oa_config, oa_context(stream),
+                             active);
 }
 
 static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream)
@@ -2485,8 +2466,9 @@ static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream)
                             0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
 }
 
-static struct i915_request *
-gen12_enable_metric_set(struct i915_perf_stream *stream)
+static int
+gen12_enable_metric_set(struct i915_perf_stream *stream,
+                       struct i915_active *active)
 {
        struct intel_uncore *uncore = stream->uncore;
        struct i915_oa_config *oa_config = stream->oa_config;
@@ -2515,9 +2497,9 @@ gen12_enable_metric_set(struct i915_perf_stream *stream)
         * to make sure all slices/subslices are ON before writing to NOA
         * registers.
         */
-       ret = gen12_configure_all_contexts(stream, oa_config);
+       ret = gen12_configure_all_contexts(stream, oa_config, active);
        if (ret)
-               return ERR_PTR(ret);
+               return ret;
 
        /*
         * For Gen12, performance counters are context
@@ -2525,12 +2507,14 @@ gen12_enable_metric_set(struct i915_perf_stream *stream)
         * requested this.
         */
        if (stream->ctx) {
-               ret = gen12_configure_oar_context(stream, true);
+               ret = gen12_configure_oar_context(stream, active);
                if (ret)
-                       return ERR_PTR(ret);
+                       return ret;
        }
 
-       return emit_oa_config(stream, oa_config, oa_context(stream));
+       return emit_oa_config(stream,
+                             stream->oa_config, oa_context(stream),
+                             active);
 }
 
 static void gen8_disable_metric_set(struct i915_perf_stream *stream)
@@ -2538,7 +2522,7 @@ static void gen8_disable_metric_set(struct i915_perf_stream *stream)
        struct intel_uncore *uncore = stream->uncore;
 
        /* Reset all contexts' slices/subslices configurations. */
-       lrc_configure_all_contexts(stream, NULL);
+       lrc_configure_all_contexts(stream, NULL, NULL);
 
        intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
 }
@@ -2548,7 +2532,7 @@ static void gen10_disable_metric_set(struct i915_perf_stream *stream)
        struct intel_uncore *uncore = stream->uncore;
 
        /* Reset all contexts' slices/subslices configurations. */
-       lrc_configure_all_contexts(stream, NULL);
+       lrc_configure_all_contexts(stream, NULL, NULL);
 
        /* Make sure we disable noa to save power. */
        intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
@@ -2559,11 +2543,11 @@ static void gen12_disable_metric_set(struct i915_perf_stream *stream)
        struct intel_uncore *uncore = stream->uncore;
 
        /* Reset all contexts' slices/subslices configurations. */
-       gen12_configure_all_contexts(stream, NULL);
+       gen12_configure_all_contexts(stream, NULL, NULL);
 
        /* disable the context save/restore or OAR counters */
        if (stream->ctx)
-               gen12_configure_oar_context(stream, false);
+               gen12_configure_oar_context(stream, NULL);
 
        /* Make sure we disable noa to save power. */
        intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
@@ -2655,11 +2639,13 @@ static void gen12_oa_enable(struct i915_perf_stream *stream)
  */
 static void i915_oa_stream_enable(struct i915_perf_stream *stream)
 {
+       stream->pollin = false;
+
        stream->perf->ops.oa_enable(stream);
 
        if (stream->periodic)
                hrtimer_start(&stream->poll_check_timer,
-                             ns_to_ktime(POLL_PERIOD),
+                             ns_to_ktime(stream->poll_oa_period),
                              HRTIMER_MODE_REL_PINNED);
 }
 
@@ -2735,16 +2721,52 @@ static const struct i915_perf_stream_ops i915_oa_stream_ops = {
 
 static int i915_perf_stream_enable_sync(struct i915_perf_stream *stream)
 {
-       struct i915_request *rq;
+       struct i915_active *active;
+       int err;
 
-       rq = stream->perf->ops.enable_metric_set(stream);
-       if (IS_ERR(rq))
-               return PTR_ERR(rq);
+       active = i915_active_create();
+       if (!active)
+               return -ENOMEM;
 
-       i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
-       i915_request_put(rq);
+       err = stream->perf->ops.enable_metric_set(stream, active);
+       if (err == 0)
+               __i915_active_wait(active, TASK_UNINTERRUPTIBLE);
 
-       return 0;
+       i915_active_put(active);
+       return err;
+}
+
+static void
+get_default_sseu_config(struct intel_sseu *out_sseu,
+                       struct intel_engine_cs *engine)
+{
+       const struct sseu_dev_info *devinfo_sseu =
+               &RUNTIME_INFO(engine->i915)->sseu;
+
+       *out_sseu = intel_sseu_from_device_info(devinfo_sseu);
+
+       if (IS_GEN(engine->i915, 11)) {
+               /*
+                * We only need subslice count so it doesn't matter which ones
+                * we select - just turn off low bits in the amount of half of
+                * all available subslices per slice.
+                */
+               out_sseu->subslice_mask =
+                       ~(~0 << (hweight8(out_sseu->subslice_mask) / 2));
+               out_sseu->slice_mask = 0x1;
+       }
+}
+
+static int
+get_sseu_config(struct intel_sseu *out_sseu,
+               struct intel_engine_cs *engine,
+               const struct drm_i915_gem_context_param_sseu *drm_sseu)
+{
+       if (drm_sseu->engine.engine_class != engine->uabi_class ||
+           drm_sseu->engine.engine_instance != engine->uabi_instance)
+               return -EINVAL;
+
+       return i915_gem_user_to_context_sseu(engine->i915, drm_sseu, out_sseu);
 }
 
 /**
@@ -2879,6 +2901,8 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
                goto err_oa_buf_alloc;
 
        stream->ops = &i915_oa_stream_ops;
+
+       perf->sseu = props->sseu;
        WRITE_ONCE(perf->exclusive_stream, stream);
 
        ret = i915_perf_stream_enable_sync(stream);
@@ -2930,10 +2954,6 @@ void i915_oa_init_reg_state(const struct intel_context *ce,
 
        /* perf.exclusive_stream serialised by lrc_configure_all_contexts() */
        stream = READ_ONCE(engine->i915->perf.exclusive_stream);
-       /*
-        * For gen12, only CTX_R_PWR_CLK_STATE needs update, but the caller
-        * is already doing that, so nothing to be done for gen12 here.
-        */
        if (stream && INTEL_GEN(stream->perf->i915) < 12)
                gen8_update_reg_state_unlocked(ce, stream);
 }
@@ -3024,7 +3044,8 @@ static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
                wake_up(&stream->poll_wq);
        }
 
-       hrtimer_forward_now(hrtimer, ns_to_ktime(POLL_PERIOD));
+       hrtimer_forward_now(hrtimer,
+                           ns_to_ktime(stream->poll_oa_period));
 
        return HRTIMER_RESTART;
 }
@@ -3155,7 +3176,7 @@ static long i915_perf_config_locked(struct i915_perf_stream *stream,
                return -EINVAL;
 
        if (config != stream->oa_config) {
-               struct i915_request *rq;
+               int err;
 
                /*
                 * If OA is bound to a specific context, emit the
@@ -3166,13 +3187,11 @@ static long i915_perf_config_locked(struct i915_perf_stream *stream,
                 * When set globally, we use a low priority kernel context,
                 * so it will effectively take effect when idle.
                 */
-               rq = emit_oa_config(stream, config, oa_context(stream));
-               if (!IS_ERR(rq)) {
+               err = emit_oa_config(stream, config, oa_context(stream), NULL);
+               if (!err)
                        config = xchg(&stream->oa_config, config);
-                       i915_request_put(rq);
-               } else {
-                       ret = PTR_ERR(rq);
-               }
+               else
+                       ret = err;
        }
 
        i915_oa_config_put(config);
@@ -3385,6 +3404,14 @@ i915_perf_open_ioctl_locked(struct i915_perf *perf,
                privileged_op = true;
        }
 
+       /*
+        * Asking for SSEU configuration is a priviliged operation.
+        */
+       if (props->has_sseu)
+               privileged_op = true;
+       else
+               get_default_sseu_config(&props->sseu, props->engine);
+
        /* Similar to perf's kernel.perf_paranoid_cpu sysctl option
         * we check a dev.i915.perf_stream_paranoid sysctl option
         * to determine if it's ok to access system wide OA counters
@@ -3405,6 +3432,7 @@ i915_perf_open_ioctl_locked(struct i915_perf *perf,
 
        stream->perf = perf;
        stream->ctx = specific_ctx;
+       stream->poll_oa_period = props->poll_oa_period;
 
        ret = i915_oa_stream_init(stream, param, props);
        if (ret)
@@ -3454,8 +3482,7 @@ err:
 
 static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent)
 {
-       return div64_u64(1000000000ULL * (2ULL << exponent),
-                        1000ULL * RUNTIME_INFO(perf->i915)->cs_timestamp_frequency_khz);
+       return i915_cs_timestamp_ticks_to_ns(perf->i915, 2ULL << exponent);
 }
 
 /**
@@ -3480,8 +3507,10 @@ static int read_properties_unlocked(struct i915_perf *perf,
 {
        u64 __user *uprop = uprops;
        u32 i;
+       int ret;
 
        memset(props, 0, sizeof(struct perf_open_properties));
+       props->poll_oa_period = DEFAULT_POLL_PERIOD_NS;
 
        if (!n_props) {
                DRM_DEBUG("No i915 perf properties given\n");
@@ -3511,7 +3540,6 @@ static int read_properties_unlocked(struct i915_perf *perf,
        for (i = 0; i < n_props; i++) {
                u64 oa_period, oa_freq_hz;
                u64 id, value;
-               int ret;
 
                ret = get_user(id, uprop);
                if (ret)
@@ -3596,6 +3624,32 @@ static int read_properties_unlocked(struct i915_perf *perf,
                case DRM_I915_PERF_PROP_HOLD_PREEMPTION:
                        props->hold_preemption = !!value;
                        break;
+               case DRM_I915_PERF_PROP_GLOBAL_SSEU: {
+                       struct drm_i915_gem_context_param_sseu user_sseu;
+
+                       if (copy_from_user(&user_sseu,
+                                          u64_to_user_ptr(value),
+                                          sizeof(user_sseu))) {
+                               DRM_DEBUG("Unable to copy global sseu parameter\n");
+                               return -EFAULT;
+                       }
+
+                       ret = get_sseu_config(&props->sseu, props->engine, &user_sseu);
+                       if (ret) {
+                               DRM_DEBUG("Invalid SSEU configuration\n");
+                               return ret;
+                       }
+                       props->has_sseu = true;
+                       break;
+               }
+               case DRM_I915_PERF_PROP_POLL_OA_PERIOD:
+                       if (value < 100000 /* 100us */) {
+                               DRM_DEBUG("OA availability timer too small (%lluns < 100us)\n",
+                                         value);
+                               return -EINVAL;
+                       }
+                       props->poll_oa_period = value;
+                       break;
                case DRM_I915_PERF_PROP_MAX:
                        MISSING_CASE(id);
                        return -EINVAL;
@@ -3678,7 +3732,6 @@ int i915_perf_open_ioctl(struct drm_device *dev, void *data,
 void i915_perf_register(struct drm_i915_private *i915)
 {
        struct i915_perf *perf = &i915->perf;
-       int ret;
 
        if (!perf->i915)
                return;
@@ -3692,64 +3745,7 @@ void i915_perf_register(struct drm_i915_private *i915)
        perf->metrics_kobj =
                kobject_create_and_add("metrics",
                                       &i915->drm.primary->kdev->kobj);
-       if (!perf->metrics_kobj)
-               goto exit;
-
-       sysfs_attr_init(&perf->test_config.sysfs_metric_id.attr);
-
-       if (IS_TIGERLAKE(i915)) {
-               i915_perf_load_test_config_tgl(i915);
-       } else if (INTEL_GEN(i915) >= 11) {
-               i915_perf_load_test_config_icl(i915);
-       } else if (IS_CANNONLAKE(i915)) {
-               i915_perf_load_test_config_cnl(i915);
-       } else if (IS_COFFEELAKE(i915)) {
-               if (IS_CFL_GT2(i915))
-                       i915_perf_load_test_config_cflgt2(i915);
-               if (IS_CFL_GT3(i915))
-                       i915_perf_load_test_config_cflgt3(i915);
-       } else if (IS_GEMINILAKE(i915)) {
-               i915_perf_load_test_config_glk(i915);
-       } else if (IS_KABYLAKE(i915)) {
-               if (IS_KBL_GT2(i915))
-                       i915_perf_load_test_config_kblgt2(i915);
-               else if (IS_KBL_GT3(i915))
-                       i915_perf_load_test_config_kblgt3(i915);
-       } else if (IS_BROXTON(i915)) {
-               i915_perf_load_test_config_bxt(i915);
-       } else if (IS_SKYLAKE(i915)) {
-               if (IS_SKL_GT2(i915))
-                       i915_perf_load_test_config_sklgt2(i915);
-               else if (IS_SKL_GT3(i915))
-                       i915_perf_load_test_config_sklgt3(i915);
-               else if (IS_SKL_GT4(i915))
-                       i915_perf_load_test_config_sklgt4(i915);
-       } else if (IS_CHERRYVIEW(i915)) {
-               i915_perf_load_test_config_chv(i915);
-       } else if (IS_BROADWELL(i915)) {
-               i915_perf_load_test_config_bdw(i915);
-       } else if (IS_HASWELL(i915)) {
-               i915_perf_load_test_config_hsw(i915);
-       }
-
-       if (perf->test_config.id == 0)
-               goto sysfs_error;
-
-       ret = sysfs_create_group(perf->metrics_kobj,
-                                &perf->test_config.sysfs_metric);
-       if (ret)
-               goto sysfs_error;
-
-       perf->test_config.perf = perf;
-       kref_init(&perf->test_config.ref);
-
-       goto exit;
 
-sysfs_error:
-       kobject_put(perf->metrics_kobj);
-       perf->metrics_kobj = NULL;
-
-exit:
        mutex_unlock(&perf->lock);
 }
 
@@ -3769,9 +3765,6 @@ void i915_perf_unregister(struct drm_i915_private *i915)
        if (!perf->metrics_kobj)
                return;
 
-       sysfs_remove_group(perf->metrics_kobj,
-                          &perf->test_config.sysfs_metric);
-
        kobject_put(perf->metrics_kobj);
        perf->metrics_kobj = NULL;
 }
@@ -4346,8 +4339,8 @@ void i915_perf_init(struct drm_i915_private *i915)
        if (perf->ops.enable_metric_set) {
                mutex_init(&perf->lock);
 
-               oa_sample_rate_hard_limit = 1000 *
-                       (RUNTIME_INFO(i915)->cs_timestamp_frequency_khz / 2);
+               oa_sample_rate_hard_limit =
+                       RUNTIME_INFO(i915)->cs_timestamp_frequency_hz / 2;
 
                mutex_init(&perf->metrics_lock);
                idr_init(&perf->metrics_idr);
@@ -4370,6 +4363,11 @@ void i915_perf_init(struct drm_i915_private *i915)
                ratelimit_set_flags(&perf->spurious_report_rs,
                                    RATELIMIT_MSG_ON_RELEASE);
 
+               ratelimit_state_init(&perf->tail_pointer_race,
+                                    5 * HZ, 10);
+               ratelimit_set_flags(&perf->tail_pointer_race,
+                                   RATELIMIT_MSG_ON_RELEASE);
+
                atomic64_set(&perf->noa_programming_delay,
                             500 * 1000 /* 500us */);
 
@@ -4430,8 +4428,15 @@ int i915_perf_ioctl_version(void)
         *    preemption on a particular context so that performance data is
         *    accessible from a delta of MI_RPC reports without looking at the
         *    OA buffer.
+        *
+        * 4: Add DRM_I915_PERF_PROP_ALLOWED_SSEU to limit what contexts can
+        *    be run for the duration of the performance recording based on
+        *    their SSEU configuration.
+        *
+        * 5: Add DRM_I915_PERF_PROP_POLL_OA_PERIOD parameter that controls the
+        *    interval for the hrtimer used to check for OA data.
         */
-       return 3;
+       return 5;
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
index a0e22f00f6cfbe84ad493596c56fdf98e19ffd99..a36a455ae3369c8cdefcaf824b14ba689e99757b 100644 (file)
 #include <linux/uuid.h>
 #include <linux/wait.h>
 
+#include "gt/intel_sseu.h"
 #include "i915_reg.h"
 #include "intel_wakeref.h"
 
 struct drm_i915_private;
 struct file;
+struct i915_active;
 struct i915_gem_context;
 struct i915_perf;
 struct i915_vma;
@@ -272,21 +274,10 @@ struct i915_perf_stream {
                spinlock_t ptr_lock;
 
                /**
-                * @tails: One 'aging' tail pointer and one 'aged' tail pointer ready to
-                * used for reading.
-                *
-                * Initial values of 0xffffffff are invalid and imply that an
-                * update is required (and should be ignored by an attempted
-                * read)
-                */
-               struct {
-                       u32 offset;
-               } tails[2];
-
-               /**
-                * @aged_tail_idx: Index for the aged tail ready to read() data up to.
+                * @aging_tail: The last HW tail reported by HW. The data
+                * might not have made it to memory yet though.
                 */
-               unsigned int aged_tail_idx;
+               u32 aging_tail;
 
                /**
                 * @aging_timestamp: A monotonic timestamp for when the current aging tail pointer
@@ -302,6 +293,11 @@ struct i915_perf_stream {
                 * OA buffer data to userspace.
                 */
                u32 head;
+
+               /**
+                * @tail: The last verified tail that can be read by userspace.
+                */
+               u32 tail;
        } oa_buffer;
 
        /**
@@ -309,6 +305,12 @@ struct i915_perf_stream {
         * reprogrammed.
         */
        struct i915_vma *noa_wait;
+
+       /**
+        * @poll_oa_period: The period in nanoseconds at which the OA
+        * buffer should be checked for available data.
+        */
+       u64 poll_oa_period;
 };
 
 /**
@@ -339,8 +341,8 @@ struct i915_oa_ops {
         * counter reports being sampled. May apply system constraints such as
         * disabling EU clock gating as required.
         */
-       struct i915_request *
-               (*enable_metric_set)(struct i915_perf_stream *stream);
+       int (*enable_metric_set)(struct i915_perf_stream *stream,
+                                struct i915_active *active);
 
        /**
         * @disable_metric_set: Remove system constraints associated with using
@@ -407,13 +409,23 @@ struct i915_perf {
         */
        struct i915_perf_stream *exclusive_stream;
 
+       /**
+        * @sseu: sseu configuration selected to run while perf is active,
+        * applies to all contexts.
+        */
+       struct intel_sseu sseu;
+
        /**
         * For rate limiting any notifications of spurious
         * invalid OA reports
         */
        struct ratelimit_state spurious_report_rs;
 
-       struct i915_oa_config test_config;
+       /**
+        * For rate limiting any notifications of tail pointer
+        * race.
+        */
+       struct ratelimit_state tail_pointer_race;
 
        u32 gen7_latched_oastatus1;
        u32 ctx_oactxctrl_offset;
index 2c062534eac134694e8a84e9e1d861174e49ee28..e991a707bdb758c29fb1a7f782c92abaa7525305 100644 (file)
@@ -439,29 +439,10 @@ static u64 count_interrupts(struct drm_i915_private *i915)
        return sum;
 }
 
-static void engine_event_destroy(struct perf_event *event)
-{
-       struct drm_i915_private *i915 =
-               container_of(event->pmu, typeof(*i915), pmu.base);
-       struct intel_engine_cs *engine;
-
-       engine = intel_engine_lookup_user(i915,
-                                         engine_event_class(event),
-                                         engine_event_instance(event));
-       if (drm_WARN_ON_ONCE(&i915->drm, !engine))
-               return;
-
-       if (engine_event_sample(event) == I915_SAMPLE_BUSY &&
-           intel_engine_supports_stats(engine))
-               intel_disable_engine_stats(engine);
-}
-
 static void i915_pmu_event_destroy(struct perf_event *event)
 {
        WARN_ON(event->parent);
-
-       if (is_engine_event(event))
-               engine_event_destroy(event);
+       module_put(THIS_MODULE);
 }
 
 static int
@@ -514,23 +495,13 @@ static int engine_event_init(struct perf_event *event)
        struct drm_i915_private *i915 =
                container_of(event->pmu, typeof(*i915), pmu.base);
        struct intel_engine_cs *engine;
-       u8 sample;
-       int ret;
 
        engine = intel_engine_lookup_user(i915, engine_event_class(event),
                                          engine_event_instance(event));
        if (!engine)
                return -ENODEV;
 
-       sample = engine_event_sample(event);
-       ret = engine_event_status(engine, sample);
-       if (ret)
-               return ret;
-
-       if (sample == I915_SAMPLE_BUSY && intel_engine_supports_stats(engine))
-               ret = intel_enable_engine_stats(engine);
-
-       return ret;
+       return engine_event_status(engine, engine_event_sample(event));
 }
 
 static int i915_pmu_event_init(struct perf_event *event)
@@ -563,8 +534,10 @@ static int i915_pmu_event_init(struct perf_event *event)
        if (ret)
                return ret;
 
-       if (!event->parent)
+       if (!event->parent) {
+               __module_get(THIS_MODULE);
                event->destroy = i915_pmu_event_destroy;
+       }
 
        return 0;
 }
@@ -1115,7 +1088,7 @@ void i915_pmu_register(struct drm_i915_private *i915)
        int ret = -ENOMEM;
 
        if (INTEL_GEN(i915) <= 2) {
-               dev_info(i915->drm.dev, "PMU not supported for this GPU.");
+               drm_info(&i915->drm, "PMU not supported for this GPU.");
                return;
        }
 
@@ -1178,7 +1151,7 @@ err_name:
        if (!is_igp(i915))
                kfree(pmu->name);
 err:
-       dev_notice(i915->drm.dev, "Failed to register PMU!\n");
+       drm_notice(&i915->drm, "Failed to register PMU!\n");
 }
 
 void i915_pmu_unregister(struct drm_i915_private *i915)
index 732aad1488813b867aa92d6cde8b64ee04b971b5..5003a71113cbe9b6cd81b5bb6e24fd90b5b71ec5 100644 (file)
@@ -24,15 +24,12 @@ enum {
        I915_PRIORITY_DISPLAY,
 };
 
-#define I915_USER_PRIORITY_SHIFT 2
+#define I915_USER_PRIORITY_SHIFT 0
 #define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT)
 
 #define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT)
 #define I915_PRIORITY_MASK (I915_PRIORITY_COUNT - 1)
 
-#define I915_PRIORITY_WAIT             ((u8)BIT(0))
-#define I915_PRIORITY_NOSEMAPHORE      ((u8)BIT(1))
-
 /* Smallest priority value that cannot be bumped. */
 #define I915_PRIORITY_INVALID (INT_MIN | (u8)I915_PRIORITY_MASK)
 
@@ -47,8 +44,6 @@ enum {
 #define I915_PRIORITY_UNPREEMPTABLE INT_MAX
 #define I915_PRIORITY_BARRIER INT_MAX
 
-#define __NO_PREEMPTION (I915_PRIORITY_WAIT)
-
 struct i915_priolist {
        struct list_head requests[I915_PRIORITY_COUNT];
        struct rb_node node;
index 6e12000c4b6b5cd7cb181c8bd2954a8da7d2b574..6c076a24eb822fa4fcfbc4a575d852cf7b6b0cc2 100644 (file)
@@ -561,6 +561,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
  * Registers used only by the command parser
  */
 #define BCS_SWCTRL _MMIO(0x22200)
+#define   BCS_SRC_Y REG_BIT(0)
+#define   BCS_DST_Y REG_BIT(1)
 
 /* There are 16 GPR registers */
 #define BCS_GPR(n)     _MMIO(0x22600 + (n) * 8)
@@ -2555,6 +2557,14 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index) * 4)
 #define GEN12_PAT_INDEX(index) _MMIO(0x4800 + (index) * 4)
 #define BSD_HWS_PGA_GEN7       _MMIO(0x04180)
+#define GEN12_GFX_CCS_AUX_NV   _MMIO(0x4208)
+#define GEN12_VD0_AUX_NV       _MMIO(0x4218)
+#define GEN12_VD1_AUX_NV       _MMIO(0x4228)
+#define GEN12_VD2_AUX_NV       _MMIO(0x4298)
+#define GEN12_VD3_AUX_NV       _MMIO(0x42A8)
+#define GEN12_VE0_AUX_NV       _MMIO(0x4238)
+#define GEN12_VE1_AUX_NV       _MMIO(0x42B8)
+#define   AUX_INV              REG_BIT(0)
 #define BLT_HWS_PGA_GEN7       _MMIO(0x04280)
 #define VEBOX_HWS_PGA_GEN7     _MMIO(0x04380)
 #define RING_ACTHD(base)       _MMIO((base) + 0x74)
@@ -2657,6 +2667,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define RING_DMA_FADD_UDW(base)        _MMIO((base) + 0x60) /* gen8+ */
 #define RING_INSTPM(base)      _MMIO((base) + 0xc0)
 #define RING_MI_MODE(base)     _MMIO((base) + 0x9c)
+#define RING_CMD_BUF_CCTL(base) _MMIO((base) + 0x84)
 #define INSTPS         _MMIO(0x2070) /* 965+ only */
 #define GEN4_INSTDONE1 _MMIO(0x207c) /* 965+ only, aka INSTDONE_2 on SNB */
 #define ACTHD_I965     _MMIO(0x2074)
@@ -4013,31 +4024,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define GEN6_RP_STATE_LIMITS   _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5994)
 #define GEN6_RP_STATE_CAP      _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998)
 #define BXT_RP_STATE_CAP        _MMIO(0x138170)
-
-/*
- * Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
- * 8300) freezing up around GPU hangs. Looks as if even
- * scheduling/timer interrupts start misbehaving if the RPS
- * EI/thresholds are "bad", leading to a very sluggish or even
- * frozen machine.
- */
-#define INTERVAL_1_28_US(us)   roundup(((us) * 100) >> 7, 25)
-#define INTERVAL_1_33_US(us)   (((us) * 3)   >> 2)
-#define INTERVAL_0_833_US(us)  (((us) * 6) / 5)
-#define GT_INTERVAL_FROM_US(dev_priv, us) (INTEL_GEN(dev_priv) >= 9 ? \
-                               (IS_GEN9_LP(dev_priv) ? \
-                               INTERVAL_0_833_US(us) : \
-                               INTERVAL_1_33_US(us)) : \
-                               INTERVAL_1_28_US(us))
-
-#define INTERVAL_1_28_TO_US(interval)  (((interval) << 7) / 100)
-#define INTERVAL_1_33_TO_US(interval)  (((interval) << 2) / 3)
-#define INTERVAL_0_833_TO_US(interval) (((interval) * 5)  / 6)
-#define GT_PM_INTERVAL_TO_US(dev_priv, interval) (INTEL_GEN(dev_priv) >= 9 ? \
-                           (IS_GEN9_LP(dev_priv) ? \
-                           INTERVAL_0_833_TO_US(interval) : \
-                           INTERVAL_1_33_TO_US(interval)) : \
-                           INTERVAL_1_28_TO_US(interval))
+#define GEN9_RP_STATE_LIMITS   _MMIO(0x138148)
 
 /*
  * Logical Context regs
@@ -4325,6 +4312,96 @@ enum {
 #define   EXITLINE_MASK                REG_GENMASK(12, 0)
 #define   EXITLINE_SHIFT       0
 
+/* VRR registers */
+#define _TRANS_VRR_CTL_A               0x60420
+#define _TRANS_VRR_CTL_B               0x61420
+#define _TRANS_VRR_CTL_C               0x62420
+#define _TRANS_VRR_CTL_D               0x63420
+#define TRANS_VRR_CTL(trans)           _MMIO_TRANS2(trans, _TRANS_VRR_CTL_A)
+#define   VRR_CTL_VRR_ENABLE           REG_BIT(31)
+#define   VRR_CTL_IGN_MAX_SHIFT                REG_BIT(30)
+#define   VRR_CTL_FLIP_LINE_EN         REG_BIT(29)
+#define   VRR_CTL_LINE_COUNT_MASK      REG_GENMASK(10, 3)
+#define   VRR_CTL_SW_FULLLINE_COUNT    REG_BIT(0)
+
+#define _TRANS_VRR_VMAX_A              0x60424
+#define _TRANS_VRR_VMAX_B              0x61424
+#define _TRANS_VRR_VMAX_C              0x62424
+#define _TRANS_VRR_VMAX_D              0x63424
+#define TRANS_VRR_VMAX(trans)          _MMIO_TRANS2(trans, _TRANS_VRR_VMAX_A)
+#define   VRR_VMAX_MASK                        REG_GENMASK(19, 0)
+
+#define _TRANS_VRR_VMIN_A              0x60434
+#define _TRANS_VRR_VMIN_B              0x61434
+#define _TRANS_VRR_VMIN_C              0x62434
+#define _TRANS_VRR_VMIN_D              0x63434
+#define TRANS_VRR_VMIN(trans)          _MMIO_TRANS2(trans, _TRANS_VRR_VMIN_A)
+#define   VRR_VMIN_MASK                        REG_GENMASK(15, 0)
+
+#define _TRANS_VRR_VMAXSHIFT_A         0x60428
+#define _TRANS_VRR_VMAXSHIFT_B         0x61428
+#define _TRANS_VRR_VMAXSHIFT_C         0x62428
+#define _TRANS_VRR_VMAXSHIFT_D         0x63428
+#define TRANS_VRR_VMAXSHIFT(trans)     _MMIO_TRANS2(trans, \
+                                       _TRANS_VRR_VMAXSHIFT_A)
+#define   VRR_VMAXSHIFT_DEC_MASK       REG_GENMASK(29, 16)
+#define   VRR_VMAXSHIFT_DEC            REG_BIT(16)
+#define   VRR_VMAXSHIFT_INC_MASK       REG_GENMASK(12, 0)
+
+#define _TRANS_VRR_STATUS_A            0x6042C
+#define _TRANS_VRR_STATUS_B            0x6142C
+#define _TRANS_VRR_STATUS_C            0x6242C
+#define _TRANS_VRR_STATUS_D            0x6342C
+#define TRANS_VRR_STATUS(trans)                _MMIO_TRANS2(trans, _TRANS_VRR_STATUS_A)
+#define   VRR_STATUS_VMAX_REACHED      REG_BIT(31)
+#define   VRR_STATUS_NOFLIP_TILL_BNDR  REG_BIT(30)
+#define   VRR_STATUS_FLIP_BEF_BNDR     REG_BIT(29)
+#define   VRR_STATUS_NO_FLIP_FRAME     REG_BIT(28)
+#define   VRR_STATUS_VRR_EN_LIVE       REG_BIT(27)
+#define   VRR_STATUS_FLIPS_SERVICED    REG_BIT(26)
+#define   VRR_STATUS_VBLANK_MASK       REG_GENMASK(22, 20)
+#define   STATUS_FSM_IDLE              REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 0)
+#define   STATUS_FSM_WAIT_TILL_FDB     REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 1)
+#define   STATUS_FSM_WAIT_TILL_FS      REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 2)
+#define   STATUS_FSM_WAIT_TILL_FLIP    REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 3)
+#define   STATUS_FSM_PIPELINE_FILL     REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 4)
+#define   STATUS_FSM_ACTIVE            REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 5)
+#define   STATUS_FSM_LEGACY_VBLANK     REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 6)
+
+#define _TRANS_VRR_VTOTAL_PREV_A       0x60480
+#define _TRANS_VRR_VTOTAL_PREV_B       0x61480
+#define _TRANS_VRR_VTOTAL_PREV_C       0x62480
+#define _TRANS_VRR_VTOTAL_PREV_D       0x63480
+#define TRANS_VRR_VTOTAL_PREV(trans)   _MMIO_TRANS2(trans, \
+                                       _TRANS_VRR_VTOTAL_PREV_A)
+#define   VRR_VTOTAL_FLIP_BEFR_BNDR    REG_BIT(31)
+#define   VRR_VTOTAL_FLIP_AFTER_BNDR   REG_BIT(30)
+#define   VRR_VTOTAL_FLIP_AFTER_DBLBUF REG_BIT(29)
+#define   VRR_VTOTAL_PREV_FRAME_MASK   REG_GENMASK(19, 0)
+
+#define _TRANS_VRR_FLIPLINE_A          0x60438
+#define _TRANS_VRR_FLIPLINE_B          0x61438
+#define _TRANS_VRR_FLIPLINE_C          0x62438
+#define _TRANS_VRR_FLIPLINE_D          0x63438
+#define TRANS_VRR_FLIPLINE(trans)      _MMIO_TRANS2(trans, \
+                                       _TRANS_VRR_FLIPLINE_A)
+#define   VRR_FLIPLINE_MASK            REG_GENMASK(19, 0)
+
+#define _TRANS_VRR_STATUS2_A           0x6043C
+#define _TRANS_VRR_STATUS2_B           0x6143C
+#define _TRANS_VRR_STATUS2_C           0x6243C
+#define _TRANS_VRR_STATUS2_D           0x6343C
+#define TRANS_VRR_STATUS2(trans)       _MMIO_TRANS2(trans, _TRANS_VRR_STATUS2_A)
+#define   VRR_STATUS2_VERT_LN_CNT_MASK REG_GENMASK(19, 0)
+
+#define _TRANS_PUSH_A                  0x60A70
+#define _TRANS_PUSH_B                  0x61A70
+#define _TRANS_PUSH_C                  0x62A70
+#define _TRANS_PUSH_D                  0x63A70
+#define TRANS_PUSH(trans)              _MMIO_TRANS2(trans, _TRANS_PUSH_A)
+#define   TRANS_PUSH_EN                        REG_BIT(31)
+#define   TRANS_PUSH_SEND              REG_BIT(30)
+
 /*
  * HSW+ eDP PSR registers
  *
@@ -6765,7 +6842,7 @@ enum {
 #define   PLANE_CTL_FORMAT_P012                        (5 << 24)
 #define   PLANE_CTL_FORMAT_XRGB_16161616F      (6 << 24)
 #define   PLANE_CTL_FORMAT_P016                        (7 << 24)
-#define   PLANE_CTL_FORMAT_AYUV                        (8 << 24)
+#define   PLANE_CTL_FORMAT_XYUV                        (8 << 24)
 #define   PLANE_CTL_FORMAT_INDEXED             (12 << 24)
 #define   PLANE_CTL_FORMAT_RGB_565             (14 << 24)
 #define   ICL_PLANE_CTL_FORMAT_MASK            (0x1f << 23)
@@ -8504,6 +8581,7 @@ enum {
 #define  FDI_BC_BIFURCATION_SELECT     (1 << 12)
 #define  CHASSIS_CLK_REQ_DURATION_MASK (0xf << 8)
 #define  CHASSIS_CLK_REQ_DURATION(x)   ((x) << 8)
+#define  SBCLK_RUN_REFCLK_DIS          (1 << 7)
 #define  SPT_PWM_GRANULARITY           (1 << 0)
 #define SOUTH_CHICKEN2         _MMIO(0xc2004)
 #define  FDI_MPHY_IOSFSB_RESET_STATUS  (1 << 13)
@@ -8995,6 +9073,7 @@ enum {
 #define     GEN7_PCODE_ILLEGAL_DATA            0x3
 #define     GEN11_PCODE_ILLEGAL_SUBCOMMAND     0x4
 #define     GEN11_PCODE_LOCKED                 0x6
+#define     GEN11_PCODE_REJECTED               0x11
 #define     GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x10
 #define   GEN6_PCODE_WRITE_RC6VIDS             0x4
 #define   GEN6_PCODE_READ_RC6VIDS              0x5
@@ -9016,10 +9095,18 @@ enum {
 #define   ICL_PCODE_MEM_SUBSYSYSTEM_INFO       0xd
 #define     ICL_PCODE_MEM_SS_READ_GLOBAL_INFO  (0x0 << 8)
 #define     ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point)        (((point) << 16) | (0x1 << 8))
+#define   ICL_PCODE_SAGV_DE_MEM_SS_CONFIG      0xe
+#define     ICL_PCODE_POINTS_RESTRICTED                0x0
+#define     ICL_PCODE_POINTS_RESTRICTED_MASK   0x1
 #define   GEN6_PCODE_READ_D_COMP               0x10
 #define   GEN6_PCODE_WRITE_D_COMP              0x11
+#define   ICL_PCODE_EXIT_TCCOLD                        0x12
 #define   HSW_PCODE_DE_WRITE_FREQ_REQ          0x17
 #define   DISPLAY_IPS_CONTROL                  0x19
+#define   TGL_PCODE_TCCOLD                     0x26
+#define     TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED   REG_BIT(0)
+#define     TGL_PCODE_EXIT_TCCOLD_DATA_H_BLOCK_REQ     0
+#define     TGL_PCODE_EXIT_TCCOLD_DATA_H_UNBLOCK_REQ   REG_BIT(0)
             /* See also IPS_CTL */
 #define     IPS_PCODE_CONTROL                  (1 << 30)
 #define   HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A
@@ -9306,6 +9393,22 @@ enum {
 #define AUD_PIN_BUF_CTL                _MMIO(0x48414)
 #define   AUD_PIN_BUF_ENABLE           REG_BIT(31)
 
+/* Display Audio Config Reg */
+#define AUD_CONFIG_BE                  _MMIO(0x65ef0)
+#define HBLANK_EARLY_ENABLE_ICL(pipe)          (0x1 << (20 - (pipe)))
+#define HBLANK_EARLY_ENABLE_TGL(pipe)          (0x1 << (24 + (pipe)))
+#define HBLANK_START_COUNT_MASK(pipe)          (0x7 << (3 + ((pipe) * 6)))
+#define HBLANK_START_COUNT(pipe, val)          (((val) & 0x7) << (3 + ((pipe)) * 6))
+#define NUMBER_SAMPLES_PER_LINE_MASK(pipe)     (0x3 << ((pipe) * 6))
+#define NUMBER_SAMPLES_PER_LINE(pipe, val)     (((val) & 0x3) << ((pipe) * 6))
+
+#define HBLANK_START_COUNT_8   0
+#define HBLANK_START_COUNT_16  1
+#define HBLANK_START_COUNT_32  2
+#define HBLANK_START_COUNT_64  3
+#define HBLANK_START_COUNT_96  4
+#define HBLANK_START_COUNT_128 5
+
 /*
  * HSW - ICL power wells
  *
@@ -9701,8 +9804,11 @@ enum skl_power_gate {
 #define  TRANS_DDI_BPC_10              (1 << 20)
 #define  TRANS_DDI_BPC_6               (2 << 20)
 #define  TRANS_DDI_BPC_12              (3 << 20)
+#define  TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK        REG_GENMASK(19, 18) /* bdw-cnl */
+#define  TRANS_DDI_PORT_SYNC_MASTER_SELECT(x)  REG_FIELD_PREP(TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK, (x))
 #define  TRANS_DDI_PVSYNC              (1 << 17)
 #define  TRANS_DDI_PHSYNC              (1 << 16)
+#define  TRANS_DDI_PORT_SYNC_ENABLE    REG_BIT(15) /* bdw-cnl */
 #define  TRANS_DDI_EDP_INPUT_MASK      (7 << 12)
 #define  TRANS_DDI_EDP_INPUT_A_ON      (0 << 12)
 #define  TRANS_DDI_EDP_INPUT_A_ONOFF   (4 << 12)
@@ -9729,12 +9835,10 @@ enum skl_power_gate {
 #define _TRANS_DDI_FUNC_CTL2_EDP       0x6f404
 #define _TRANS_DDI_FUNC_CTL2_DSI0      0x6b404
 #define _TRANS_DDI_FUNC_CTL2_DSI1      0x6bc04
-#define TRANS_DDI_FUNC_CTL2(tran)      _MMIO_TRANS2(tran, \
-                                                    _TRANS_DDI_FUNC_CTL2_A)
-#define  PORT_SYNC_MODE_ENABLE                 (1 << 4)
-#define  PORT_SYNC_MODE_MASTER_SELECT(x)       ((x) << 0)
-#define  PORT_SYNC_MODE_MASTER_SELECT_MASK     (0x7 << 0)
-#define  PORT_SYNC_MODE_MASTER_SELECT_SHIFT    0
+#define TRANS_DDI_FUNC_CTL2(tran)      _MMIO_TRANS2(tran, _TRANS_DDI_FUNC_CTL2_A)
+#define  PORT_SYNC_MODE_ENABLE                 REG_BIT(4)
+#define  PORT_SYNC_MODE_MASTER_SELECT_MASK     REG_GENMASK(2, 0)
+#define  PORT_SYNC_MODE_MASTER_SELECT(x)       REG_FIELD_PREP(PORT_SYNC_MODE_MASTER_SELECT_MASK, (x))
 
 /* DisplayPort Transport Control */
 #define _DP_TP_CTL_A                   0x64040
@@ -9795,6 +9899,24 @@ enum skl_power_gate {
 #define  DDI_BUF_BALANCE_LEG_ENABLE    (1 << 31)
 #define DDI_BUF_TRANS_HI(port, i)      _MMIO(_PORT(port, _DDI_BUF_TRANS_A, _DDI_BUF_TRANS_B) + (i) * 8 + 4)
 
+/* DDI DP Compliance Control */
+#define _DDI_DP_COMP_CTL_A                     0x605F0
+#define _DDI_DP_COMP_CTL_B                     0x615F0
+#define DDI_DP_COMP_CTL(pipe)                  _MMIO_PIPE(pipe, _DDI_DP_COMP_CTL_A, _DDI_DP_COMP_CTL_B)
+#define   DDI_DP_COMP_CTL_ENABLE               (1 << 31)
+#define   DDI_DP_COMP_CTL_D10_2                        (0 << 28)
+#define   DDI_DP_COMP_CTL_SCRAMBLED_0          (1 << 28)
+#define   DDI_DP_COMP_CTL_PRBS7                        (2 << 28)
+#define   DDI_DP_COMP_CTL_CUSTOM80             (3 << 28)
+#define   DDI_DP_COMP_CTL_HBR2                 (4 << 28)
+#define   DDI_DP_COMP_CTL_SCRAMBLED_1          (5 << 28)
+#define   DDI_DP_COMP_CTL_HBR2_RESET           (0xFC << 0)
+
+/* DDI DP Compliance Pattern */
+#define _DDI_DP_COMP_PAT_A                     0x605F4
+#define _DDI_DP_COMP_PAT_B                     0x615F4
+#define DDI_DP_COMP_PAT(pipe, i)               _MMIO(_PIPE(pipe, _DDI_DP_COMP_PAT_A, _DDI_DP_COMP_PAT_B) + (i) * 4)
+
 /* Sideband Interface (SBI) is programmed indirectly, via
  * SBI_ADDR, which contains the register offset; and SBI_DATA,
  * which contains the payload */
@@ -10742,6 +10864,12 @@ enum skl_power_gate {
 
 #define _PAL_PREC_MULTI_SEG_DATA_A     0x4A40C
 #define _PAL_PREC_MULTI_SEG_DATA_B     0x4AC0C
+#define  PAL_PREC_MULTI_SEG_RED_LDW_MASK   REG_GENMASK(29, 24)
+#define  PAL_PREC_MULTI_SEG_RED_UDW_MASK   REG_GENMASK(29, 20)
+#define  PAL_PREC_MULTI_SEG_GREEN_LDW_MASK REG_GENMASK(19, 14)
+#define  PAL_PREC_MULTI_SEG_GREEN_UDW_MASK REG_GENMASK(19, 10)
+#define  PAL_PREC_MULTI_SEG_BLUE_LDW_MASK  REG_GENMASK(9, 4)
+#define  PAL_PREC_MULTI_SEG_BLUE_UDW_MASK  REG_GENMASK(9, 0)
 
 #define PREC_PAL_MULTI_SEG_INDEX(pipe) _MMIO_PIPE(pipe, \
                                        _PAL_PREC_MULTI_SEG_INDEX_A, \
index e2b78db685eaecb9599dbd5b2c28f60a104e51e0..526c1e9acbd587117fcf0aa18ce81975d6076187 100644 (file)
@@ -23,6 +23,7 @@
  */
 
 #include <linux/dma-fence-array.h>
+#include <linux/dma-fence-chain.h>
 #include <linux/irq_work.h>
 #include <linux/prefetch.h>
 #include <linux/sched.h>
@@ -101,6 +102,11 @@ static signed long i915_fence_wait(struct dma_fence *fence,
                                 timeout);
 }
 
+struct kmem_cache *i915_request_slab_cache(void)
+{
+       return global.slab_requests;
+}
+
 static void i915_fence_release(struct dma_fence *fence)
 {
        struct i915_request *rq = to_request(fence);
@@ -115,6 +121,10 @@ static void i915_fence_release(struct dma_fence *fence)
        i915_sw_fence_fini(&rq->submit);
        i915_sw_fence_fini(&rq->semaphore);
 
+       /* Keep one request on each engine for reserved use under mempressure */
+       if (!cmpxchg(&rq->engine->request_pool, NULL, rq))
+               return;
+
        kmem_cache_free(global.slab_requests, rq);
 }
 
@@ -358,8 +368,6 @@ __await_execution(struct i915_request *rq,
        }
        spin_unlock_irq(&signal->lock);
 
-       /* Copy across semaphore status as we need the same behaviour */
-       rq->sched.flags |= signal->sched.flags;
        return 0;
 }
 
@@ -527,10 +535,8 @@ void __i915_request_unsubmit(struct i915_request *request)
        spin_unlock(&request->lock);
 
        /* We've already spun, don't charge on resubmitting. */
-       if (request->sched.semaphores && i915_request_started(request)) {
-               request->sched.attr.priority |= I915_PRIORITY_NOSEMAPHORE;
+       if (request->sched.semaphores && i915_request_started(request))
                request->sched.semaphores = 0;
-       }
 
        /*
         * We don't need to wake_up any waiters on request->execute, they
@@ -588,15 +594,6 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
        return NOTIFY_DONE;
 }
 
-static void irq_semaphore_cb(struct irq_work *wrk)
-{
-       struct i915_request *rq =
-               container_of(wrk, typeof(*rq), semaphore_work);
-
-       i915_schedule_bump_priority(rq, I915_PRIORITY_NOSEMAPHORE);
-       i915_request_put(rq);
-}
-
 static int __i915_sw_fence_call
 semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
 {
@@ -604,11 +601,6 @@ semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
 
        switch (state) {
        case FENCE_COMPLETE:
-               if (!(READ_ONCE(rq->sched.attr.priority) & I915_PRIORITY_NOSEMAPHORE)) {
-                       i915_request_get(rq);
-                       init_irq_work(&rq->semaphore_work, irq_semaphore_cb);
-                       irq_work_queue(&rq->semaphore_work);
-               }
                break;
 
        case FENCE_FREE:
@@ -629,14 +621,22 @@ static void retire_requests(struct intel_timeline *tl)
 }
 
 static noinline struct i915_request *
-request_alloc_slow(struct intel_timeline *tl, gfp_t gfp)
+request_alloc_slow(struct intel_timeline *tl,
+                  struct i915_request **rsvd,
+                  gfp_t gfp)
 {
        struct i915_request *rq;
 
-       if (list_empty(&tl->requests))
-               goto out;
+       /* If we cannot wait, dip into our reserves */
+       if (!gfpflags_allow_blocking(gfp)) {
+               rq = xchg(rsvd, NULL);
+               if (!rq) /* Use the normal failure path for one final WARN */
+                       goto out;
 
-       if (!gfpflags_allow_blocking(gfp))
+               return rq;
+       }
+
+       if (list_empty(&tl->requests))
                goto out;
 
        /* Move our oldest request to the slab-cache (if not in use!) */
@@ -721,7 +721,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
        rq = kmem_cache_alloc(global.slab_requests,
                              gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
        if (unlikely(!rq)) {
-               rq = request_alloc_slow(tl, gfp);
+               rq = request_alloc_slow(tl, &ce->engine->request_pool, gfp);
                if (!rq) {
                        ret = -ENOMEM;
                        goto err_unreserve;
@@ -933,6 +933,7 @@ __emit_semaphore_wait(struct i915_request *to,
        u32 *cs;
 
        GEM_BUG_ON(INTEL_GEN(to->i915) < 8);
+       GEM_BUG_ON(i915_request_has_initial_breadcrumb(to));
 
        /* We need to pin the signaler's HWSP until we are finished reading. */
        err = intel_timeline_read_hwsp(from, to, &hwsp_offset);
@@ -978,13 +979,26 @@ emit_semaphore_wait(struct i915_request *to,
                    gfp_t gfp)
 {
        const intel_engine_mask_t mask = READ_ONCE(from->engine)->mask;
+       struct i915_sw_fence *wait = &to->submit;
 
        if (!intel_context_use_semaphores(to->context))
                goto await_fence;
 
+       if (i915_request_has_initial_breadcrumb(to))
+               goto await_fence;
+
        if (!rcu_access_pointer(from->hwsp_cacheline))
                goto await_fence;
 
+       /*
+        * If this or its dependents are waiting on an external fence
+        * that may fail catastrophically, then we want to avoid using
+        * sempahores as they bypass the fence signaling metadata, and we
+        * lose the fence->error propagation.
+        */
+       if (from->sched.flags & I915_SCHED_HAS_EXTERNAL_CHAIN)
+               goto await_fence;
+
        /* Just emit the first semaphore we see as request space is limited. */
        if (already_busywaiting(to) & mask)
                goto await_fence;
@@ -1000,11 +1014,10 @@ emit_semaphore_wait(struct i915_request *to,
                goto await_fence;
 
        to->sched.semaphores |= mask;
-       to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
-       return 0;
+       wait = &to->semaphore;
 
 await_fence:
-       return i915_sw_fence_await_dma_fence(&to->submit,
+       return i915_sw_fence_await_dma_fence(wait,
                                             &from->fence, 0,
                                             I915_FENCE_GFP);
 }
@@ -1039,15 +1052,56 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
        if (ret < 0)
                return ret;
 
-       if (to->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN) {
-               ret = i915_sw_fence_await_dma_fence(&to->semaphore,
-                                                   &from->fence, 0,
-                                                   I915_FENCE_GFP);
-               if (ret < 0)
-                       return ret;
+       return 0;
+}
+
+static void mark_external(struct i915_request *rq)
+{
+       /*
+        * The downside of using semaphores is that we lose metadata passing
+        * along the signaling chain. This is particularly nasty when we
+        * need to pass along a fatal error such as EFAULT or EDEADLK. For
+        * fatal errors we want to scrub the request before it is executed,
+        * which means that we cannot preload the request onto HW and have
+        * it wait upon a semaphore.
+        */
+       rq->sched.flags |= I915_SCHED_HAS_EXTERNAL_CHAIN;
+}
+
+static int
+__i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
+{
+       mark_external(rq);
+       return i915_sw_fence_await_dma_fence(&rq->submit, fence,
+                                            i915_fence_context_timeout(rq->i915,
+                                                                       fence->context),
+                                            I915_FENCE_GFP);
+}
+
+static int
+i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
+{
+       struct dma_fence *iter;
+       int err = 0;
+
+       if (!to_dma_fence_chain(fence))
+               return __i915_request_await_external(rq, fence);
+
+       dma_fence_chain_for_each(iter, fence) {
+               struct dma_fence_chain *chain = to_dma_fence_chain(iter);
+
+               if (!dma_fence_is_i915(chain->fence)) {
+                       err = __i915_request_await_external(rq, iter);
+                       break;
+               }
+
+               err = i915_request_await_dma_fence(rq, chain->fence);
+               if (err < 0)
+                       break;
        }
 
-       return 0;
+       dma_fence_put(iter);
+       return err;
 }
 
 int
@@ -1097,9 +1151,7 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
                if (dma_fence_is_i915(fence))
                        ret = i915_request_await_request(rq, to_request(fence));
                else
-                       ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
-                                                           fence->context ? I915_FENCE_TIMEOUT : 0,
-                                                           I915_FENCE_GFP);
+                       ret = i915_request_await_external(rq, fence);
                if (ret < 0)
                        return ret;
 
@@ -1179,7 +1231,8 @@ __i915_request_await_execution(struct i915_request *to,
         * immediate execution, and so we must wait until it reaches the
         * active slot.
         */
-       if (intel_engine_has_semaphores(to->engine)) {
+       if (intel_engine_has_semaphores(to->engine) &&
+           !i915_request_has_initial_breadcrumb(to)) {
                err = __emit_semaphore_wait(to, from, from->fence.seqno - 1);
                if (err < 0)
                        return err;
@@ -1225,6 +1278,9 @@ i915_request_await_execution(struct i915_request *rq,
                        continue;
                }
 
+               if (fence->context == rq->fence.context)
+                       continue;
+
                /*
                 * We don't squash repeated fence dependencies here as we
                 * want to run our callback in all cases.
@@ -1235,9 +1291,7 @@ i915_request_await_execution(struct i915_request *rq,
                                                             to_request(fence),
                                                             hook);
                else
-                       ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
-                                                           I915_FENCE_TIMEOUT,
-                                                           GFP_KERNEL);
+                       ret = i915_request_await_external(rq, fence);
                if (ret < 0)
                        return ret;
        } while (--nchild);
@@ -1445,14 +1499,7 @@ void i915_request_add(struct i915_request *rq)
                attr = ctx->sched;
        rcu_read_unlock();
 
-       if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
-               attr.priority |= I915_PRIORITY_NOSEMAPHORE;
-       if (list_empty(&rq->sched.signalers_list))
-               attr.priority |= I915_PRIORITY_WAIT;
-
-       local_bh_disable();
        __i915_request_queue(rq, &attr);
-       local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
 
        mutex_unlock(&tl->mutex);
 }
@@ -1636,7 +1683,6 @@ long i915_request_wait(struct i915_request *rq,
        if (flags & I915_WAIT_PRIORITY) {
                if (!i915_request_started(rq) && INTEL_GEN(rq->i915) >= 6)
                        intel_rps_boost(rq);
-               i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
        }
 
        wait.tsk = current;
index 3c552bfea67a68eba098524687572250ce4f34e9..8ec7ee4dbadc9ba4f32301eedb363715ec3ae32b 100644 (file)
@@ -84,19 +84,26 @@ enum {
        I915_FENCE_FLAG_PQUEUE,
 
        /*
-        * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
+        * I915_FENCE_FLAG_HOLD - this request is currently on hold
         *
-        * Internal bookkeeping used by the breadcrumb code to track when
-        * a request is on the various signal_list.
+        * This request has been suspended, pending an ongoing investigation.
         */
-       I915_FENCE_FLAG_SIGNAL,
+       I915_FENCE_FLAG_HOLD,
 
        /*
-        * I915_FENCE_FLAG_HOLD - this request is currently on hold
+        * I915_FENCE_FLAG_INITIAL_BREADCRUMB - this request has the initial
+        * breadcrumb that marks the end of semaphore waits and start of the
+        * user payload.
+        */
+       I915_FENCE_FLAG_INITIAL_BREADCRUMB,
+
+       /*
+        * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
         *
-        * This request has been suspended, pending an ongoing investigation.
+        * Internal bookkeeping used by the breadcrumb code to track when
+        * a request is on the various signal_list.
         */
-       I915_FENCE_FLAG_HOLD,
+       I915_FENCE_FLAG_SIGNAL,
 
        /*
         * I915_FENCE_FLAG_NOPREEMPT - this request should not be preempted
@@ -209,7 +216,6 @@ struct i915_request {
        };
        struct list_head execute_cb;
        struct i915_sw_fence semaphore;
-       struct irq_work semaphore_work;
 
        /*
         * A list of everyone we wait upon, and everyone who waits upon us.
@@ -300,6 +306,8 @@ static inline bool dma_fence_is_i915(const struct dma_fence *fence)
        return fence->ops == &i915_fence_ops;
 }
 
+struct kmem_cache *i915_request_slab_cache(void);
+
 struct i915_request * __must_check
 __i915_request_create(struct intel_context *ce, gfp_t gfp);
 struct i915_request * __must_check
@@ -388,6 +396,12 @@ static inline bool i915_request_in_priority_queue(const struct i915_request *rq)
        return test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
 }
 
+static inline bool
+i915_request_has_initial_breadcrumb(const struct i915_request *rq)
+{
+       return test_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags);
+}
+
 /**
  * Returns true if seq1 is later than seq2.
  */
index f0a9e8958ca0d64fb73fb1219e86e8607c779b12..f4ea318781f0f541e87c379350021719fb31e14d 100644 (file)
@@ -51,11 +51,11 @@ static void assert_priolists(struct intel_engine_execlists * const execlists)
        GEM_BUG_ON(rb_first_cached(&execlists->queue) !=
                   rb_first(&execlists->queue.rb_root));
 
-       last_prio = (INT_MAX >> I915_USER_PRIORITY_SHIFT) + 1;
+       last_prio = INT_MAX;
        for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
                const struct i915_priolist *p = to_priolist(rb);
 
-               GEM_BUG_ON(p->priority >= last_prio);
+               GEM_BUG_ON(p->priority > last_prio);
                last_prio = p->priority;
 
                GEM_BUG_ON(!p->used);
@@ -174,7 +174,7 @@ sched_lock_engine(const struct i915_sched_node *node,
 
 static inline int rq_prio(const struct i915_request *rq)
 {
-       return rq->sched.attr.priority | __NO_PREEMPTION;
+       return rq->sched.attr.priority;
 }
 
 static inline bool need_preempt(int prio, int active)
@@ -209,6 +209,12 @@ static void kick_submission(struct intel_engine_cs *engine,
        if (!inflight)
                goto unlock;
 
+       ENGINE_TRACE(engine,
+                    "bumping queue-priority-hint:%d for rq:%llx:%lld, inflight:%llx:%lld prio %d\n",
+                    prio,
+                    rq->fence.context, rq->fence.seqno,
+                    inflight->fence.context, inflight->fence.seqno,
+                    inflight->sched.attr.priority);
        engine->execlists.queue_priority_hint = prio;
 
        /*
@@ -428,25 +434,12 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
                dep->waiter = node;
                dep->flags = flags;
 
-               /* Keep track of whether anyone on this chain has a semaphore */
-               if (signal->flags & I915_SCHED_HAS_SEMAPHORE_CHAIN &&
-                   !node_started(signal))
-                       node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
-
                /* All set, now publish. Beware the lockless walkers. */
                list_add_rcu(&dep->signal_link, &node->signalers_list);
                list_add_rcu(&dep->wait_link, &signal->waiters_list);
 
-               /*
-                * As we do not allow WAIT to preempt inflight requests,
-                * once we have executed a request, along with triggering
-                * any execution callbacks, we must preserve its ordering
-                * within the non-preemptible FIFO.
-                */
-               BUILD_BUG_ON(__NO_PREEMPTION & ~I915_PRIORITY_MASK);
-               if (flags & I915_DEPENDENCY_EXTERNAL)
-                       __bump_priority(signal, __NO_PREEMPTION);
-
+               /* Propagate the chains */
+               node->flags |= signal->flags;
                ret = true;
        }
 
@@ -465,10 +458,14 @@ int i915_sched_node_add_dependency(struct i915_sched_node *node,
        if (!dep)
                return -ENOMEM;
 
+       local_bh_disable();
+
        if (!__i915_sched_node_add_dependency(node, signal, dep,
                                              flags | I915_DEPENDENCY_ALLOC))
                i915_dependency_free(dep);
 
+       local_bh_enable(); /* kick submission tasklet */
+
        return 0;
 }
 
index 7186875088a0a285c1f05a13df5846db478f4162..f72e6c397b088889ae8072a35c5236a0d58427e1 100644 (file)
@@ -65,7 +65,7 @@ struct i915_sched_node {
        struct list_head link;
        struct i915_sched_attr attr;
        unsigned int flags;
-#define I915_SCHED_HAS_SEMAPHORE_CHAIN BIT(0)
+#define I915_SCHED_HAS_EXTERNAL_CHAIN  BIT(0)
        intel_engine_mask_t semaphores;
 };
 
index 98bcb6fa0ab48af79056efb1d4ec31543de2a17a..d53d207ab6eb6de78217b1bd13f379df004cd6ed 100644 (file)
@@ -133,4 +133,6 @@ bool __igt_timeout(unsigned long timeout, const char *fmt, ...);
 #define igt_timeout(t, fmt, ...) \
        __igt_timeout((t), KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
 
+void igt_hexdump(const void *buf, size_t len);
+
 #endif /* !__I915_SELFTEST_H__ */
index a3d38e089b6ee2f4e9f5bf8de92df87a9f01451d..295b9829e2da519f8ff2933305dc76c2d7898a2a 100644 (file)
@@ -421,7 +421,7 @@ static void timer_i915_sw_fence_wake(struct timer_list *t)
        if (!fence)
                return;
 
-       pr_notice("Asynchronous wait on fence %s:%s:%llx timed out (hint:%pS)\n",
+       pr_notice("Asynchronous wait on fence %s:%s:%llx timed out (hint:%ps)\n",
                  cb->dma->ops->get_driver_name(cb->dma),
                  cb->dma->ops->get_timeline_name(cb->dma),
                  cb->dma->seqno,
@@ -546,13 +546,11 @@ int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
        cb->fence = fence;
        i915_sw_fence_await(fence);
 
-       ret = dma_fence_add_callback(dma, &cb->base, __dma_i915_sw_fence_wake);
-       if (ret == 0) {
-               ret = 1;
-       } else {
+       ret = 1;
+       if (dma_fence_add_callback(dma, &cb->base, __dma_i915_sw_fence_wake)) {
+               /* fence already signaled */
                __dma_i915_sw_fence_wake(dma, &cb->base);
-               if (ret == -ENOENT) /* fence already signaled */
-                       ret = 0;
+               ret = 0;
        }
 
        return ret;
index 997b2998f1f2158ea53e4b202f55154ecce5cbb4..a3a81bb8f2c36ffb97bb2b99ad4510ffbdc7258a 100644 (file)
@@ -38,7 +38,10 @@ fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
 
                if (!f->dma.error) {
                        dma_fence_get(&f->dma);
-                       queue_work(system_unbound_wq, &f->work);
+                       if (test_bit(DMA_FENCE_WORK_IMM, &f->dma.flags))
+                               fence_work(&f->work);
+                       else
+                               queue_work(system_unbound_wq, &f->work);
                } else {
                        fence_complete(f);
                }
index 3a22b287e2019129a7379929a912f5cc3c87455e..2c409f11c5c5979931ba119e8044e8db0c460888 100644 (file)
@@ -32,6 +32,10 @@ struct dma_fence_work {
        const struct dma_fence_work_ops *ops;
 };
 
+enum {
+       DMA_FENCE_WORK_IMM = DMA_FENCE_FLAG_USER_BITS,
+};
+
 void dma_fence_work_init(struct dma_fence_work *f,
                         const struct dma_fence_work_ops *ops);
 int dma_fence_work_chain(struct dma_fence_work *f, struct dma_fence *signal);
@@ -41,4 +45,23 @@ static inline void dma_fence_work_commit(struct dma_fence_work *f)
        i915_sw_fence_commit(&f->chain);
 }
 
+/**
+ * dma_fence_work_commit_imm: Commit the fence, and if possible execute locally.
+ * @f: the fenced worker
+ *
+ * Instead of always scheduling a worker to execute the callback (see
+ * dma_fence_work_commit()), we try to execute the callback immediately in
+ * the local context. It is required that the fence be committed before it
+ * is published, and that no other threads try to tamper with the number
+ * of asynchronous waits on the fence (or else the callback will be
+ * executed in the wrong context, i.e. not the callers).
+ */
+static inline void dma_fence_work_commit_imm(struct dma_fence_work *f)
+{
+       if (atomic_read(&f->chain.pending) <= 1)
+               __set_bit(DMA_FENCE_WORK_IMM, &f->dma.flags);
+
+       dma_fence_work_commit(f);
+}
+
 #endif /* I915_SW_FENCE_WORK_H */
index ed69b5d4a3753031e7a735fd4e1c88ae39569574..b3a24eac21f16e035283de091c372c97793bc0ee 100644 (file)
@@ -20,14 +20,14 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev,
        }
 
        if (state == VGA_SWITCHEROO_ON) {
-               pr_info("switched on\n");
+               drm_info(&i915->drm, "switched on\n");
                i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING;
                /* i915 resume handler doesn't set to D0 */
                pci_set_power_state(pdev, PCI_D0);
                i915_resume_switcheroo(i915);
                i915->drm.switch_power_state = DRM_SWITCH_POWER_ON;
        } else {
-               pr_info("switched off\n");
+               drm_info(&i915->drm, "switched off\n");
                i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING;
                i915_suspend_switcheroo(i915, pmm);
                i915->drm.switch_power_state = DRM_SWITCH_POWER_OFF;
index 029854ae65fc781918df0bf65e83c5acb368009f..e28eae4a8f70626b39774d28ce718255e2f85455 100644 (file)
@@ -101,5 +101,6 @@ void set_timer_ms(struct timer_list *t, unsigned long timeout)
         */
        barrier();
 
-       mod_timer(t, jiffies + timeout);
+       /* Keep t->expires = 0 reserved to indicate a canceled timer. */
+       mod_timer(t, jiffies + timeout ?: 1);
 }
index 2cd7a7e87c0a750ff87db98f9bfce927af88dda6..fc14ebf9a0b709ce029d5409016954156184e537 100644 (file)
@@ -522,7 +522,6 @@ void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
        GEM_BUG_ON(!obj);
 
        i915_vma_unpin(vma);
-       i915_vma_close(vma);
 
        if (flags & I915_VMA_RELEASE_MAP)
                i915_gem_object_unpin_map(obj);
@@ -610,18 +609,6 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
        return true;
 }
 
-static void assert_bind_count(const struct drm_i915_gem_object *obj)
-{
-       /*
-        * Combine the assertion that the object is bound and that we have
-        * pinned its pages. But we should never have bound the object
-        * more than we have pinned its pages. (For complete accuracy, we
-        * assume that no else is pinning the pages, but as a rough assertion
-        * that we will not run into problems later, this will do!)
-        */
-       GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < atomic_read(&obj->bind_count));
-}
-
 /**
  * i915_vma_insert - finds a slot for the vma in its address space
  * @vma: the vma
@@ -740,12 +727,6 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
        GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
        GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
 
-       if (vma->obj) {
-               struct drm_i915_gem_object *obj = vma->obj;
-
-               atomic_inc(&obj->bind_count);
-               assert_bind_count(obj);
-       }
        list_add_tail(&vma->vm_link, &vma->vm->bound_list);
 
        return 0;
@@ -763,12 +744,6 @@ i915_vma_detach(struct i915_vma *vma)
         * it to be reaped by the shrinker.
         */
        list_del(&vma->vm_link);
-       if (vma->obj) {
-               struct drm_i915_gem_object *obj = vma->obj;
-
-               assert_bind_count(obj);
-               atomic_dec(&obj->bind_count);
-       }
 }
 
 static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
@@ -915,11 +890,30 @@ int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
        if (flags & PIN_GLOBAL)
                wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
 
-       /* No more allocations allowed once we hold vm->mutex */
-       err = mutex_lock_interruptible(&vma->vm->mutex);
+       /*
+        * Differentiate between user/kernel vma inside the aliasing-ppgtt.
+        *
+        * We conflate the Global GTT with the user's vma when using the
+        * aliasing-ppgtt, but it is still vitally important to try and
+        * keep the use cases distinct. For example, userptr objects are
+        * not allowed inside the Global GTT as that will cause lock
+        * inversions when we have to evict them the mmu_notifier callbacks -
+        * but they are allowed to be part of the user ppGTT which can never
+        * be mapped. As such we try to give the distinct users of the same
+        * mutex, distinct lockclasses [equivalent to how we keep i915_ggtt
+        * and i915_ppgtt separate].
+        *
+        * NB this may cause us to mask real lock inversions -- while the
+        * code is safe today, lockdep may not be able to spot future
+        * transgressions.
+        */
+       err = mutex_lock_interruptible_nested(&vma->vm->mutex,
+                                             !(flags & PIN_GLOBAL));
        if (err)
                goto err_fence;
 
+       /* No more allocations allowed now we hold vm->mutex */
+
        if (unlikely(i915_vma_is_closed(vma))) {
                err = -ENOENT;
                goto err_unlock;
@@ -982,7 +976,7 @@ err_unlock:
        mutex_unlock(&vma->vm->mutex);
 err_fence:
        if (work)
-               dma_fence_work_commit(&work->base);
+               dma_fence_work_commit_imm(&work->base);
        if (wakeref)
                intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
 err_pages:
@@ -1028,13 +1022,8 @@ int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags)
        } while (1);
 }
 
-void i915_vma_close(struct i915_vma *vma)
+static void __vma_close(struct i915_vma *vma, struct intel_gt *gt)
 {
-       struct intel_gt *gt = vma->vm->gt;
-       unsigned long flags;
-
-       GEM_BUG_ON(i915_vma_is_closed(vma));
-
        /*
         * We defer actually closing, unbinding and destroying the VMA until
         * the next idle point, or if the object is freed in the meantime. By
@@ -1047,9 +1036,25 @@ void i915_vma_close(struct i915_vma *vma)
         * causing us to rebind the VMA once more. This ends up being a lot
         * of wasted work for the steady state.
         */
-       spin_lock_irqsave(&gt->closed_lock, flags);
+       GEM_BUG_ON(i915_vma_is_closed(vma));
        list_add(&vma->closed_link, &gt->closed_vma);
-       spin_unlock_irqrestore(&gt->closed_lock, flags);
+}
+
+void i915_vma_close(struct i915_vma *vma)
+{
+       struct intel_gt *gt = vma->vm->gt;
+       unsigned long flags;
+
+       if (i915_vma_is_ggtt(vma))
+               return;
+
+       GEM_BUG_ON(!atomic_read(&vma->open_count));
+       if (atomic_dec_and_lock_irqsave(&vma->open_count,
+                                       &gt->closed_lock,
+                                       flags)) {
+               __vma_close(vma, gt);
+               spin_unlock_irqrestore(&gt->closed_lock, flags);
+       }
 }
 
 static void __i915_vma_remove_closed(struct i915_vma *vma)
@@ -1174,7 +1179,8 @@ int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
        GEM_BUG_ON(!i915_vma_is_pinned(vma));
 
        /* Wait for the vma to be bound before we start! */
-       err = i915_request_await_active(rq, &vma->active, 0);
+       err = i915_request_await_active(rq, &vma->active,
+                                       I915_ACTIVE_AWAIT_EXCL);
        if (err)
                return err;
 
@@ -1215,6 +1221,10 @@ int i915_vma_move_to_active(struct i915_vma *vma,
                dma_resv_add_shared_fence(vma->resv, &rq->fence);
                obj->write_domain = 0;
        }
+
+       if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
+               i915_active_add_request(&vma->fence->active, rq);
+
        obj->read_domains |= I915_GEM_GPU_DOMAINS;
        obj->mm.dirty = true;
 
@@ -1249,6 +1259,9 @@ int __i915_vma_unbind(struct i915_vma *vma)
        GEM_BUG_ON(i915_vma_is_active(vma));
 
        if (i915_vma_is_map_and_fenceable(vma)) {
+               /* Force a pagefault for domain tracking on next user access */
+               i915_vma_revoke_mmap(vma);
+
                /*
                 * Check that we have flushed all writes through the GGTT
                 * before the unbind, other due to non-strict nature of those
@@ -1265,12 +1278,7 @@ int __i915_vma_unbind(struct i915_vma *vma)
                i915_vma_flush_writes(vma);
 
                /* release the fence reg _after_ flushing */
-               ret = i915_vma_revoke_fence(vma);
-               if (ret)
-                       return ret;
-
-               /* Force a pagefault for domain tracking on next user access */
-               i915_vma_revoke_mmap(vma);
+               i915_vma_revoke_fence(vma);
 
                __i915_vma_iounmap(vma);
                clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
@@ -1315,7 +1323,7 @@ int i915_vma_unbind(struct i915_vma *vma)
                /* XXX not always required: nop_clear_range */
                wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
 
-       err = mutex_lock_interruptible(&vm->mutex);
+       err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref);
        if (err)
                goto out_rpm;
 
index e1ced1df13e1c7bffb0a1980eaf09b684e01f48e..8ad1daabcd58bbb1ec37be60b3a207f5df223104 100644 (file)
 
 #include <drm/drm_mm.h>
 
+#include "gt/intel_ggtt_fencing.h"
 #include "gem/i915_gem_object.h"
 
 #include "i915_gem_gtt.h"
-#include "i915_gem_fence_reg.h"
 
 #include "i915_active.h"
 #include "i915_request.h"
@@ -326,7 +326,7 @@ static inline struct page *i915_vma_first_page(struct i915_vma *vma)
  * True if the vma has a fence, false otherwise.
  */
 int __must_check i915_vma_pin_fence(struct i915_vma *vma);
-int __must_check i915_vma_revoke_fence(struct i915_vma *vma);
+void i915_vma_revoke_fence(struct i915_vma *vma);
 
 int __i915_vma_pin_fence(struct i915_vma *vma);
 
index d7fe12734db889eb0f9708fc949c7e1283d8cff8..8a635bd4d5d8a01c0795591f1d28309cdde230c3 100644 (file)
@@ -98,6 +98,7 @@ void intel_device_info_print_static(const struct intel_device_info *info,
        drm_printf(p, "platform: %s\n", intel_platform_name(info->platform));
        drm_printf(p, "ppgtt-size: %d\n", info->ppgtt_size);
        drm_printf(p, "ppgtt-type: %d\n", info->ppgtt_type);
+       drm_printf(p, "dma_mask_size: %u\n", info->dma_mask_size);
 
 #define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name));
        DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
@@ -135,8 +136,8 @@ void intel_device_info_print_runtime(const struct intel_runtime_info *info,
        sseu_dump(&info->sseu, p);
 
        drm_printf(p, "rawclk rate: %u kHz\n", info->rawclk_freq);
-       drm_printf(p, "CS timestamp frequency: %u kHz\n",
-                  info->cs_timestamp_frequency_khz);
+       drm_printf(p, "CS timestamp frequency: %u Hz\n",
+                  info->cs_timestamp_frequency_hz);
 }
 
 static int sseu_eu_idx(const struct sseu_dev_info *sseu, int slice,
@@ -677,12 +678,12 @@ static u32 read_reference_ts_freq(struct drm_i915_private *dev_priv)
 
        base_freq = ((ts_override & GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK) >>
                     GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT) + 1;
-       base_freq *= 1000;
+       base_freq *= 1000000;
 
        frac_freq = ((ts_override &
                      GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK) >>
                     GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT);
-       frac_freq = 1000 / (frac_freq + 1);
+       frac_freq = 1000000 / (frac_freq + 1);
 
        return base_freq + frac_freq;
 }
@@ -690,8 +691,8 @@ static u32 read_reference_ts_freq(struct drm_i915_private *dev_priv)
 static u32 gen10_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
                                        u32 rpm_config_reg)
 {
-       u32 f19_2_mhz = 19200;
-       u32 f24_mhz = 24000;
+       u32 f19_2_mhz = 19200000;
+       u32 f24_mhz = 24000000;
        u32 crystal_clock = (rpm_config_reg &
                             GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
                            GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
@@ -710,10 +711,10 @@ static u32 gen10_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
 static u32 gen11_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
                                        u32 rpm_config_reg)
 {
-       u32 f19_2_mhz = 19200;
-       u32 f24_mhz = 24000;
-       u32 f25_mhz = 25000;
-       u32 f38_4_mhz = 38400;
+       u32 f19_2_mhz = 19200000;
+       u32 f24_mhz = 24000000;
+       u32 f25_mhz = 25000000;
+       u32 f38_4_mhz = 38400000;
        u32 crystal_clock = (rpm_config_reg &
                             GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
                            GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
@@ -735,9 +736,9 @@ static u32 gen11_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
 
 static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
 {
-       u32 f12_5_mhz = 12500;
-       u32 f19_2_mhz = 19200;
-       u32 f24_mhz = 24000;
+       u32 f12_5_mhz = 12500000;
+       u32 f19_2_mhz = 19200000;
+       u32 f24_mhz = 24000000;
 
        if (INTEL_GEN(dev_priv) <= 4) {
                /* PRMs say:
@@ -746,7 +747,7 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
                 *      hclks." (through the “Clocking Configuration”
                 *      (“CLKCFG”) MCHBAR register)
                 */
-               return RUNTIME_INFO(dev_priv)->rawclk_freq / 16;
+               return RUNTIME_INFO(dev_priv)->rawclk_freq * 1000 / 16;
        } else if (INTEL_GEN(dev_priv) <= 8) {
                /* PRMs say:
                 *
@@ -980,35 +981,32 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
                        drm_info(&dev_priv->drm,
                                 "Display fused off, disabling\n");
                        info->pipe_mask = 0;
+                       info->cpu_transcoder_mask = 0;
                } else if (fuse_strap & IVB_PIPE_C_DISABLE) {
                        drm_info(&dev_priv->drm, "PipeC fused off\n");
                        info->pipe_mask &= ~BIT(PIPE_C);
+                       info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
                }
        } else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) {
                u32 dfsm = I915_READ(SKL_DFSM);
-               u8 enabled_mask = info->pipe_mask;
-
-               if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
-                       enabled_mask &= ~BIT(PIPE_A);
-               if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
-                       enabled_mask &= ~BIT(PIPE_B);
-               if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
-                       enabled_mask &= ~BIT(PIPE_C);
-               if (INTEL_GEN(dev_priv) >= 12 &&
-                   (dfsm & TGL_DFSM_PIPE_D_DISABLE))
-                       enabled_mask &= ~BIT(PIPE_D);
 
-               /*
-                * At least one pipe should be enabled and if there are
-                * disabled pipes, they should be the last ones, with no holes
-                * in the mask.
-                */
-               if (enabled_mask == 0 || !is_power_of_2(enabled_mask + 1))
-                       drm_err(&dev_priv->drm,
-                               "invalid pipe fuse configuration: enabled_mask=0x%x\n",
-                               enabled_mask);
-               else
-                       info->pipe_mask = enabled_mask;
+               if (dfsm & SKL_DFSM_PIPE_A_DISABLE) {
+                       info->pipe_mask &= ~BIT(PIPE_A);
+                       info->cpu_transcoder_mask &= ~BIT(TRANSCODER_A);
+               }
+               if (dfsm & SKL_DFSM_PIPE_B_DISABLE) {
+                       info->pipe_mask &= ~BIT(PIPE_B);
+                       info->cpu_transcoder_mask &= ~BIT(TRANSCODER_B);
+               }
+               if (dfsm & SKL_DFSM_PIPE_C_DISABLE) {
+                       info->pipe_mask &= ~BIT(PIPE_C);
+                       info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
+               }
+               if (INTEL_GEN(dev_priv) >= 12 &&
+                   (dfsm & TGL_DFSM_PIPE_D_DISABLE)) {
+                       info->pipe_mask &= ~BIT(PIPE_D);
+                       info->cpu_transcoder_mask &= ~BIT(TRANSCODER_D);
+               }
 
                if (dfsm & SKL_DFSM_DISPLAY_HDCP_DISABLE)
                        info->display.has_hdcp = 0;
@@ -1050,11 +1048,11 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
        drm_dbg(&dev_priv->drm, "rawclk rate: %d kHz\n", runtime->rawclk_freq);
 
        /* Initialize command stream timestamp frequency */
-       runtime->cs_timestamp_frequency_khz =
+       runtime->cs_timestamp_frequency_hz =
                read_timestamp_frequency(dev_priv);
-       if (runtime->cs_timestamp_frequency_khz) {
+       if (runtime->cs_timestamp_frequency_hz) {
                runtime->cs_timestamp_period_ns =
-                       div_u64(1e6, runtime->cs_timestamp_frequency_khz);
+                       i915_cs_timestamp_ticks_to_ns(dev_priv, 1);
                drm_dbg(&dev_priv->drm,
                        "CS timestamp wraparound in %lldms\n",
                        div_u64(mul_u32_u32(runtime->cs_timestamp_period_ns,
index 1ecb9df2de9178e04e2ee8266c6606e3d1d47d23..62e03ffa377e68cc59cc2264bb6663bb8fa8e7aa 100644 (file)
@@ -158,6 +158,8 @@ struct intel_device_info {
 
        enum intel_platform platform;
 
+       unsigned int dma_mask_size; /* available DMA address bits */
+
        enum intel_ppgtt_type ppgtt_type;
        unsigned int ppgtt_size; /* log2, e.g. 31/32/48 bits */
 
@@ -168,6 +170,7 @@ struct intel_device_info {
        u32 display_mmio_offset;
 
        u8 pipe_mask;
+       u8 cpu_transcoder_mask;
 
 #define DEFINE_FLAG(name) u8 name:1
        DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
@@ -218,7 +221,7 @@ struct intel_runtime_info {
 
        u32 rawclk_freq;
 
-       u32 cs_timestamp_frequency_khz;
+       u32 cs_timestamp_frequency_hz;
        u32 cs_timestamp_period_ns;
 
        /* Media engine access to SFC per instance */
index 6b922efb1d7c68dc22532850e322c83f8cafca07..8aa12cad93ce370682c66760331dea30b481e829 100644 (file)
@@ -495,6 +495,5 @@ void intel_dram_edram_detect(struct drm_i915_private *i915)
        else
                i915->edram_size_mb = gen9_edram_size_mb(i915, edram_cap);
 
-       dev_info(i915->drm.dev,
-                "Found %uMB of eDRAM\n", i915->edram_size_mb);
+       drm_info(&i915->drm, "Found %uMB of eDRAM\n", i915->edram_size_mb);
 }
index a52986a9e7a68245f670746e1e3ba32ad90db042..696491d71a1d1a6765badf57c3bd0db6b14d11e2 100644 (file)
@@ -43,6 +43,7 @@
 #include "i915_fixed.h"
 #include "i915_irq.h"
 #include "i915_trace.h"
+#include "display/intel_bw.h"
 #include "intel_pm.h"
 #include "intel_sideband.h"
 #include "../../../platform/x86/intel_ips.h"
@@ -3637,10 +3638,6 @@ static bool skl_needs_memory_bw_wa(struct drm_i915_private *dev_priv)
 static bool
 intel_has_sagv(struct drm_i915_private *dev_priv)
 {
-       /* HACK! */
-       if (IS_GEN(dev_priv, 12))
-               return false;
-
        return (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) &&
                dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED;
 }
@@ -3757,42 +3754,120 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
        return 0;
 }
 
-bool intel_can_enable_sagv(struct intel_atomic_state *state)
+void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
 {
-       struct drm_device *dev = state->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *crtc;
-       struct intel_plane *plane;
-       struct intel_crtc_state *crtc_state;
-       enum pipe pipe;
-       int level, latency;
+       struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+       const struct intel_bw_state *new_bw_state;
+       const struct intel_bw_state *old_bw_state;
+       u32 new_mask = 0;
 
+       /*
+        * Just return if we can't control SAGV or don't have it.
+        * This is different from situation when we have SAGV but just can't
+        * afford it due to DBuf limitation - in case if SAGV is completely
+        * disabled in a BIOS, we are not even allowed to send a PCode request,
+        * as it will throw an error. So have to check it here.
+        */
        if (!intel_has_sagv(dev_priv))
-               return false;
+               return;
+
+       new_bw_state = intel_atomic_get_new_bw_state(state);
+       if (!new_bw_state)
+               return;
 
+       if (INTEL_GEN(dev_priv) < 11 && !intel_can_enable_sagv(dev_priv, new_bw_state)) {
+               intel_disable_sagv(dev_priv);
+               return;
+       }
+
+       old_bw_state = intel_atomic_get_old_bw_state(state);
        /*
-        * If there are no active CRTCs, no additional checks need be performed
+        * Nothing to mask
         */
-       if (hweight8(state->active_pipes) == 0)
-               return true;
+       if (new_bw_state->qgv_points_mask == old_bw_state->qgv_points_mask)
+               return;
+
+       new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
+
+       /*
+        * If new mask is zero - means there is nothing to mask,
+        * we can only unmask, which should be done in unmask.
+        */
+       if (!new_mask)
+               return;
+
+       /*
+        * Restrict required qgv points before updating the configuration.
+        * According to BSpec we can't mask and unmask qgv points at the same
+        * time. Also masking should be done before updating the configuration
+        * and unmasking afterwards.
+        */
+       icl_pcode_restrict_qgv_points(dev_priv, new_mask);
+}
+
+void intel_sagv_post_plane_update(struct intel_atomic_state *state)
+{
+       struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+       const struct intel_bw_state *new_bw_state;
+       const struct intel_bw_state *old_bw_state;
+       u32 new_mask = 0;
+
+       /*
+        * Just return if we can't control SAGV or don't have it.
+        * This is different from situation when we have SAGV but just can't
+        * afford it due to DBuf limitation - in case if SAGV is completely
+        * disabled in a BIOS, we are not even allowed to send a PCode request,
+        * as it will throw an error. So have to check it here.
+        */
+       if (!intel_has_sagv(dev_priv))
+               return;
+
+       new_bw_state = intel_atomic_get_new_bw_state(state);
+       if (!new_bw_state)
+               return;
+
+       if (INTEL_GEN(dev_priv) < 11 && intel_can_enable_sagv(dev_priv, new_bw_state)) {
+               intel_enable_sagv(dev_priv);
+               return;
+       }
+
+       old_bw_state = intel_atomic_get_old_bw_state(state);
+       /*
+        * Nothing to unmask
+        */
+       if (new_bw_state->qgv_points_mask == old_bw_state->qgv_points_mask)
+               return;
+
+       new_mask = new_bw_state->qgv_points_mask;
 
        /*
-        * SKL+ workaround: bspec recommends we disable SAGV when we have
-        * more then one pipe enabled
+        * Allow required qgv points after updating the configuration.
+        * According to BSpec we can't mask and unmask qgv points at the same
+        * time. Also masking should be done before updating the configuration
+        * and unmasking afterwards.
         */
-       if (hweight8(state->active_pipes) > 1)
+       icl_pcode_restrict_qgv_points(dev_priv, new_mask);
+}
+
+static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       struct intel_plane *plane;
+       const struct intel_plane_state *plane_state;
+       int level, latency;
+
+       if (!intel_has_sagv(dev_priv))
                return false;
 
-       /* Since we're now guaranteed to only have one active CRTC... */
-       pipe = ffs(state->active_pipes) - 1;
-       crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
-       crtc_state = to_intel_crtc_state(crtc->base.state);
+       if (!crtc_state->hw.active)
+               return true;
 
        if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
                return false;
 
-       for_each_intel_plane_on_crtc(dev, crtc, plane) {
-               struct skl_plane_wm *wm =
+       intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
+               const struct skl_plane_wm *wm =
                        &crtc_state->wm.skl.optimal.planes[plane->id];
 
                /* Skip this plane if it's not enabled */
@@ -3807,7 +3882,7 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state)
                latency = dev_priv->wm.skl_latency[level];
 
                if (skl_needs_memory_bw_wa(dev_priv) &&
-                   plane->base.state->fb->modifier ==
+                   plane_state->uapi.fb->modifier ==
                    I915_FORMAT_MOD_X_TILED)
                        latency += 15;
 
@@ -3823,6 +3898,112 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state)
        return true;
 }
 
+static bool tgl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+       enum plane_id plane_id;
+
+       if (!crtc_state->hw.active)
+               return true;
+
+       for_each_plane_id_on_crtc(crtc, plane_id) {
+               const struct skl_ddb_entry *plane_alloc =
+                       &crtc_state->wm.skl.plane_ddb_y[plane_id];
+               const struct skl_plane_wm *wm =
+                       &crtc_state->wm.skl.optimal.planes[plane_id];
+
+               if (skl_ddb_entry_size(plane_alloc) < wm->sagv_wm0.min_ddb_alloc)
+                       return false;
+       }
+
+       return true;
+}
+
+static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+       if (INTEL_GEN(dev_priv) >= 12)
+               return tgl_crtc_can_enable_sagv(crtc_state);
+       else
+               return skl_crtc_can_enable_sagv(crtc_state);
+}
+
+bool intel_can_enable_sagv(struct drm_i915_private *dev_priv,
+                          const struct intel_bw_state *bw_state)
+{
+       if (INTEL_GEN(dev_priv) < 11 &&
+           bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes))
+               return false;
+
+       return bw_state->pipe_sagv_reject == 0;
+}
+
+static int intel_compute_sagv_mask(struct intel_atomic_state *state)
+{
+       struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+       int ret;
+       struct intel_crtc *crtc;
+       struct intel_crtc_state *new_crtc_state;
+       struct intel_bw_state *new_bw_state = NULL;
+       const struct intel_bw_state *old_bw_state = NULL;
+       int i;
+
+       for_each_new_intel_crtc_in_state(state, crtc,
+                                        new_crtc_state, i) {
+               new_bw_state = intel_atomic_get_bw_state(state);
+               if (IS_ERR(new_bw_state))
+                       return PTR_ERR(new_bw_state);
+
+               old_bw_state = intel_atomic_get_old_bw_state(state);
+
+               if (intel_crtc_can_enable_sagv(new_crtc_state))
+                       new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe);
+               else
+                       new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe);
+       }
+
+       if (!new_bw_state)
+               return 0;
+
+       new_bw_state->active_pipes =
+               intel_calc_active_pipes(state, old_bw_state->active_pipes);
+
+       if (new_bw_state->active_pipes != old_bw_state->active_pipes) {
+               ret = intel_atomic_lock_global_state(&new_bw_state->base);
+               if (ret)
+                       return ret;
+       }
+
+       for_each_new_intel_crtc_in_state(state, crtc,
+                                        new_crtc_state, i) {
+               struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
+
+               /*
+                * We store use_sagv_wm in the crtc state rather than relying on
+                * that bw state since we have no convenient way to get at the
+                * latter from the plane commit hooks (especially in the legacy
+                * cursor case)
+                */
+               pipe_wm->use_sagv_wm = INTEL_GEN(dev_priv) >= 12 &&
+                                      intel_can_enable_sagv(dev_priv, new_bw_state);
+       }
+
+       if (intel_can_enable_sagv(dev_priv, new_bw_state) !=
+           intel_can_enable_sagv(dev_priv, old_bw_state)) {
+               ret = intel_atomic_serialize_global_state(&new_bw_state->base);
+               if (ret)
+                       return ret;
+       } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
+               ret = intel_atomic_lock_global_state(&new_bw_state->base);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
 /*
  * Calculate initial DBuf slice offset, based on slice size
  * and mask(i.e if slice size is 1024 and second slice is enabled
@@ -4016,6 +4197,7 @@ static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
                                 int color_plane);
 static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
                                 int level,
+                                unsigned int latency,
                                 const struct skl_wm_params *wp,
                                 const struct skl_wm_level *result_prev,
                                 struct skl_wm_level *result /* out */);
@@ -4038,7 +4220,9 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
        drm_WARN_ON(&dev_priv->drm, ret);
 
        for (level = 0; level <= max_level; level++) {
-               skl_compute_plane_wm(crtc_state, level, &wp, &wm, &wm);
+               unsigned int latency = dev_priv->wm.skl_latency[level];
+
+               skl_compute_plane_wm(crtc_state, level, latency, &wp, &wm, &wm);
                if (wm.min_ddb_alloc == U16_MAX)
                        break;
 
@@ -4544,6 +4728,20 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
        return total_data_rate;
 }
 
+static const struct skl_wm_level *
+skl_plane_wm_level(const struct intel_crtc_state *crtc_state,
+                  enum plane_id plane_id,
+                  int level)
+{
+       const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
+       const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
+
+       if (level == 0 && pipe_wm->use_sagv_wm)
+               return &wm->sagv_wm0;
+
+       return &wm->wm[level];
+}
+
 static int
 skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state)
 {
@@ -4580,7 +4778,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state)
                                                         plane_data_rate,
                                                         uv_plane_data_rate);
 
-
        skl_ddb_get_pipe_allocation_limits(dev_priv, crtc_state, total_data_rate,
                                           alloc, &num_active);
        alloc_size = skl_ddb_entry_size(alloc);
@@ -4780,7 +4977,7 @@ skl_wm_method1(const struct drm_i915_private *dev_priv, u32 pixel_rate,
        wm_intermediate_val = latency * pixel_rate * cpp;
        ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size);
 
-       if (INTEL_GEN(dev_priv) >= 10)
+       if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
                ret = add_fixed16_u32(ret, 1);
 
        return ret;
@@ -4915,18 +5112,19 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
                                           wp->y_min_scanlines,
                                           wp->dbuf_block_size);
 
-               if (INTEL_GEN(dev_priv) >= 10)
+               if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
                        interm_pbpl++;
 
                wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
                                                        wp->y_min_scanlines);
-       } else if (wp->x_tiled && IS_GEN(dev_priv, 9)) {
-               interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
-                                          wp->dbuf_block_size);
-               wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
        } else {
                interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
-                                          wp->dbuf_block_size) + 1;
+                                          wp->dbuf_block_size);
+
+               if (!wp->x_tiled ||
+                   INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+                       interm_pbpl++;
+
                wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
        }
 
@@ -4972,12 +5170,12 @@ static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level)
 
 static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
                                 int level,
+                                unsigned int latency,
                                 const struct skl_wm_params *wp,
                                 const struct skl_wm_level *result_prev,
                                 struct skl_wm_level *result /* out */)
 {
        struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
-       u32 latency = dev_priv->wm.skl_latency[level];
        uint_fixed_16_16_t method1, method2;
        uint_fixed_16_16_t selected_result;
        u32 res_blocks, res_lines, min_ddb_alloc = 0;
@@ -5106,14 +5304,29 @@ skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
 
        for (level = 0; level <= max_level; level++) {
                struct skl_wm_level *result = &levels[level];
+               unsigned int latency = dev_priv->wm.skl_latency[level];
 
-               skl_compute_plane_wm(crtc_state, level, wm_params,
-                                    result_prev, result);
+               skl_compute_plane_wm(crtc_state, level, latency,
+                                    wm_params, result_prev, result);
 
                result_prev = result;
        }
 }
 
+static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state,
+                               const struct skl_wm_params *wm_params,
+                               struct skl_plane_wm *plane_wm)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+       struct skl_wm_level *sagv_wm = &plane_wm->sagv_wm0;
+       struct skl_wm_level *levels = plane_wm->wm;
+       unsigned int latency = dev_priv->wm.skl_latency[0] + dev_priv->sagv_block_time_us;
+
+       skl_compute_plane_wm(crtc_state, 0, latency,
+                            wm_params, &levels[0],
+                            sagv_wm);
+}
+
 static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state,
                                      const struct skl_wm_params *wp,
                                      struct skl_plane_wm *wm)
@@ -5166,10 +5379,6 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state,
                                trans_offset_b;
        } else {
                res_blocks = wm0_sel_res_b + trans_offset_b;
-
-               /* WA BUG:1938466 add one block for non y-tile planes */
-               if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))
-                       res_blocks += 1;
        }
 
        /*
@@ -5185,6 +5394,8 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
                                     const struct intel_plane_state *plane_state,
                                     enum plane_id plane_id, int color_plane)
 {
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
        struct skl_wm_params wm_params;
        int ret;
@@ -5195,6 +5406,10 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
                return ret;
 
        skl_compute_wm_levels(crtc_state, &wm_params, wm->wm);
+
+       if (INTEL_GEN(dev_priv) >= 12)
+               tgl_compute_sagv_wm(crtc_state, &wm_params, wm);
+
        skl_compute_transition_wm(crtc_state, &wm_params, wm);
 
        return 0;
@@ -5354,8 +5569,12 @@ void skl_write_plane_wm(struct intel_plane *plane,
                &crtc_state->wm.skl.plane_ddb_uv[plane_id];
 
        for (level = 0; level <= max_level; level++) {
+               const struct skl_wm_level *wm_level;
+
+               wm_level = skl_plane_wm_level(crtc_state, plane_id, level);
+
                skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
-                                  &wm->wm[level]);
+                                  wm_level);
        }
        skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
                           &wm->trans_wm);
@@ -5388,8 +5607,12 @@ void skl_write_cursor_wm(struct intel_plane *plane,
                &crtc_state->wm.skl.plane_ddb_y[plane_id];
 
        for (level = 0; level <= max_level; level++) {
+               const struct skl_wm_level *wm_level;
+
+               wm_level = skl_plane_wm_level(crtc_state, plane_id, level);
+
                skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
-                                  &wm->wm[level]);
+                                  wm_level);
        }
        skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm);
 
@@ -5424,8 +5647,8 @@ static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv,
        return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm);
 }
 
-static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
-                                          const struct skl_ddb_entry *b)
+static bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
+                                   const struct skl_ddb_entry *b)
 {
        return a->start < b->end && b->start < a->end;
 }
@@ -5553,23 +5776,25 @@ skl_print_wm_changes(struct intel_atomic_state *state)
                                continue;
 
                        drm_dbg_kms(&dev_priv->drm,
-                                   "[PLANE:%d:%s]   level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm"
-                                   " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm\n",
+                                   "[PLANE:%d:%s]   level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm"
+                                   " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm\n",
                                    plane->base.base.id, plane->base.name,
                                    enast(old_wm->wm[0].plane_en), enast(old_wm->wm[1].plane_en),
                                    enast(old_wm->wm[2].plane_en), enast(old_wm->wm[3].plane_en),
                                    enast(old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en),
                                    enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en),
                                    enast(old_wm->trans_wm.plane_en),
+                                   enast(old_wm->sagv_wm0.plane_en),
                                    enast(new_wm->wm[0].plane_en), enast(new_wm->wm[1].plane_en),
                                    enast(new_wm->wm[2].plane_en), enast(new_wm->wm[3].plane_en),
                                    enast(new_wm->wm[4].plane_en), enast(new_wm->wm[5].plane_en),
                                    enast(new_wm->wm[6].plane_en), enast(new_wm->wm[7].plane_en),
-                                   enast(new_wm->trans_wm.plane_en));
+                                   enast(new_wm->trans_wm.plane_en),
+                                   enast(new_wm->sagv_wm0.plane_en));
 
                        drm_dbg_kms(&dev_priv->drm,
-                                   "[PLANE:%d:%s]   lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d"
-                                     " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n",
+                                   "[PLANE:%d:%s]   lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d"
+                                     " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n",
                                    plane->base.base.id, plane->base.name,
                                    enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].plane_res_l,
                                    enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].plane_res_l,
@@ -5580,6 +5805,7 @@ skl_print_wm_changes(struct intel_atomic_state *state)
                                    enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l,
                                    enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l,
                                    enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.plane_res_l,
+                                   enast(old_wm->sagv_wm0.ignore_lines), old_wm->sagv_wm0.plane_res_l,
 
                                    enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].plane_res_l,
                                    enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].plane_res_l,
@@ -5589,37 +5815,42 @@ skl_print_wm_changes(struct intel_atomic_state *state)
                                    enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].plane_res_l,
                                    enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].plane_res_l,
                                    enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].plane_res_l,
-                                   enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.plane_res_l);
+                                   enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.plane_res_l,
+                                   enast(new_wm->sagv_wm0.ignore_lines), new_wm->sagv_wm0.plane_res_l);
 
                        drm_dbg_kms(&dev_priv->drm,
-                                   "[PLANE:%d:%s]  blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
-                                   " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
+                                   "[PLANE:%d:%s]  blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
+                                   " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
                                    plane->base.base.id, plane->base.name,
                                    old_wm->wm[0].plane_res_b, old_wm->wm[1].plane_res_b,
                                    old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b,
                                    old_wm->wm[4].plane_res_b, old_wm->wm[5].plane_res_b,
                                    old_wm->wm[6].plane_res_b, old_wm->wm[7].plane_res_b,
                                    old_wm->trans_wm.plane_res_b,
+                                   old_wm->sagv_wm0.plane_res_b,
                                    new_wm->wm[0].plane_res_b, new_wm->wm[1].plane_res_b,
                                    new_wm->wm[2].plane_res_b, new_wm->wm[3].plane_res_b,
                                    new_wm->wm[4].plane_res_b, new_wm->wm[5].plane_res_b,
                                    new_wm->wm[6].plane_res_b, new_wm->wm[7].plane_res_b,
-                                   new_wm->trans_wm.plane_res_b);
+                                   new_wm->trans_wm.plane_res_b,
+                                   new_wm->sagv_wm0.plane_res_b);
 
                        drm_dbg_kms(&dev_priv->drm,
-                                   "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
-                                   " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
+                                   "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
+                                   " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
                                    plane->base.base.id, plane->base.name,
                                    old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
                                    old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
                                    old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
                                    old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
                                    old_wm->trans_wm.min_ddb_alloc,
+                                   old_wm->sagv_wm0.min_ddb_alloc,
                                    new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
                                    new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
                                    new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
                                    new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
-                                   new_wm->trans_wm.min_ddb_alloc);
+                                   new_wm->trans_wm.min_ddb_alloc,
+                                   new_wm->sagv_wm0.min_ddb_alloc);
                }
        }
 }
@@ -5780,6 +6011,10 @@ skl_compute_wm(struct intel_atomic_state *state)
        if (ret)
                return ret;
 
+       ret = intel_compute_sagv_mask(state);
+       if (ret)
+               return ret;
+
        /*
         * skl_compute_ddb() will have adjusted the final watermarks
         * based on how much ddb is available. Now we can actually
@@ -5876,8 +6111,7 @@ static void ilk_optimize_watermarks(struct intel_atomic_state *state,
        mutex_unlock(&dev_priv->wm.wm_mutex);
 }
 
-static inline void skl_wm_level_from_reg_val(u32 val,
-                                            struct skl_wm_level *level)
+static void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level)
 {
        level->plane_en = val & PLANE_WM_EN;
        level->ignore_lines = val & PLANE_WM_IGNORE_LINES;
@@ -5909,6 +6143,9 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
                        skl_wm_level_from_reg_val(val, &wm->wm[level]);
                }
 
+               if (INTEL_GEN(dev_priv) >= 12)
+                       wm->sagv_wm0 = wm->wm[0];
+
                if (plane_id != PLANE_CURSOR)
                        val = I915_READ(PLANE_WM_TRANS(pipe, plane_id));
                else
@@ -6850,6 +7087,10 @@ static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
        if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0))
                I915_WRITE(GEN9_CLKGATE_DIS_3, I915_READ(GEN9_CLKGATE_DIS_3) |
                           TGL_VRH_GATING_DIS);
+
+       /* Wa_14011059788:tgl */
+       intel_uncore_rmw(&dev_priv->uncore, GEN10_DFR_RATIO_EN_AND_CHICKEN,
+                        0, DFR_DISABLE);
 }
 
 static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -6882,9 +7123,6 @@ static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
        val = I915_READ(SLICE_UNIT_LEVEL_CLKGATE);
        /* ReadHitWriteOnlyDisable:cnl */
        val |= RCCUNIT_CLKGATE_DIS;
-       /* WaSarbUnitClockGatingDisable:cnl (pre-prod) */
-       if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_B0))
-               val |= SARBUNIT_CLKGATE_DIS;
        I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, val);
 
        /* Wa_2201832410:cnl */
index d60a85421c5ad5957077f366167490cf5c5e9034..614ac7f8d4ccc65c379c9b2c859ce6fc604852dc 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/types.h>
 
 #include "i915_reg.h"
+#include "display/intel_bw.h"
 
 struct drm_device;
 struct drm_i915_private;
@@ -41,9 +42,12 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
                              struct skl_pipe_wm *out);
 void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
 void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
-bool intel_can_enable_sagv(struct intel_atomic_state *state);
+bool intel_can_enable_sagv(struct drm_i915_private *dev_priv,
+                          const struct intel_bw_state *bw_state);
 int intel_enable_sagv(struct drm_i915_private *dev_priv);
 int intel_disable_sagv(struct drm_i915_private *dev_priv);
+void intel_sagv_pre_plane_update(struct intel_atomic_state *state);
+void intel_sagv_post_plane_update(struct intel_atomic_state *state);
 bool skl_wm_level_equals(const struct skl_wm_level *l1,
                         const struct skl_wm_level *l2);
 bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
index 3f13baaef0581d43469c278368785de3ad359670..916ccd1c0e96978ed0acb2dc427e158815778552 100644 (file)
@@ -336,7 +336,7 @@ void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value,
        intel_sbi_rw(i915, reg, destination, &value, false);
 }
 
-static inline int gen6_check_mailbox_status(u32 mbox)
+static int gen6_check_mailbox_status(u32 mbox)
 {
        switch (mbox & GEN6_PCODE_ERROR_MASK) {
        case GEN6_PCODE_SUCCESS:
@@ -356,7 +356,7 @@ static inline int gen6_check_mailbox_status(u32 mbox)
        }
 }
 
-static inline int gen7_check_mailbox_status(u32 mbox)
+static int gen7_check_mailbox_status(u32 mbox)
 {
        switch (mbox & GEN6_PCODE_ERROR_MASK) {
        case GEN6_PCODE_SUCCESS:
@@ -371,6 +371,8 @@ static inline int gen7_check_mailbox_status(u32 mbox)
                return -ENXIO;
        case GEN11_PCODE_LOCKED:
                return -EBUSY;
+       case GEN11_PCODE_REJECTED:
+               return -EACCES;
        case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
                return -EOVERFLOW;
        default:
@@ -429,7 +431,7 @@ int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox,
 
        mutex_lock(&i915->sb_lock);
        err = __sandybridge_pcode_rw(i915, mbox, val, val1,
-                                    500, 0,
+                                    500, 20,
                                     true);
        mutex_unlock(&i915->sb_lock);
 
index abb18b90d7c392eea4d0d48b50bf48ecc07021b1..a61cb8ca4d50f34cd3878aefd48861741529f73e 100644 (file)
@@ -665,7 +665,7 @@ void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
                mmio_debug_resume(uncore->debug);
 
                if (check_for_unclaimed_mmio(uncore))
-                       dev_info(uncore->i915->drm.dev,
+                       drm_info(&uncore->i915->drm,
                                 "Invalid mmio detected during user access\n");
                spin_unlock(&uncore->debug->lock);
 
@@ -734,6 +734,28 @@ void intel_uncore_forcewake_put(struct intel_uncore *uncore,
        spin_unlock_irqrestore(&uncore->lock, irqflags);
 }
 
+/**
+ * intel_uncore_forcewake_flush - flush the delayed release
+ * @uncore: the intel_uncore structure
+ * @fw_domains: forcewake domains to flush
+ */
+void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
+                                 enum forcewake_domains fw_domains)
+{
+       struct intel_uncore_forcewake_domain *domain;
+       unsigned int tmp;
+
+       if (!uncore->funcs.force_wake_put)
+               return;
+
+       fw_domains &= uncore->fw_domains;
+       for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
+               WRITE_ONCE(domain->active, false);
+               if (hrtimer_cancel(&domain->timer))
+                       intel_uncore_fw_release_timer(&domain->timer);
+       }
+}
+
 /**
  * intel_uncore_forcewake_put__locked - grab forcewake domain references
  * @uncore: the intel_uncore structure
@@ -877,11 +899,6 @@ find_fw_domain(struct intel_uncore *uncore, u32 offset)
 #define GEN_FW_RANGE(s, e, d) \
        { .start = (s), .end = (e), .domains = (d) }
 
-#define HAS_FWTABLE(dev_priv) \
-       (INTEL_GEN(dev_priv) >= 9 || \
-        IS_CHERRYVIEW(dev_priv) || \
-        IS_VALLEYVIEW(dev_priv))
-
 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
 static const struct intel_forcewake_range __vlv_fw_ranges[] = {
        GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
@@ -1070,8 +1087,7 @@ static const struct intel_forcewake_range __gen9_fw_ranges[] = {
 
 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
 static const struct intel_forcewake_range __gen11_fw_ranges[] = {
-       GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
-       GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
+       GEN_FW_RANGE(0x0, 0x1fff, 0), /* uncore range */
        GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
        GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
        GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
@@ -1081,27 +1097,31 @@ static const struct intel_forcewake_range __gen11_fw_ranges[] = {
        GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
        GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
        GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
-       GEN_FW_RANGE(0x8500, 0x8bff, FORCEWAKE_BLITTER),
+       GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER),
+       GEN_FW_RANGE(0x8800, 0x8bff, 0),
        GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
-       GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
-       GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_ALL),
-       GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
+       GEN_FW_RANGE(0x8d00, 0x94cf, FORCEWAKE_BLITTER),
+       GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
+       GEN_FW_RANGE(0x9560, 0x95ff, 0),
+       GEN_FW_RANGE(0x9600, 0xafff, FORCEWAKE_BLITTER),
        GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
        GEN_FW_RANGE(0xb480, 0xdeff, FORCEWAKE_BLITTER),
        GEN_FW_RANGE(0xdf00, 0xe8ff, FORCEWAKE_RENDER),
        GEN_FW_RANGE(0xe900, 0x16dff, FORCEWAKE_BLITTER),
        GEN_FW_RANGE(0x16e00, 0x19fff, FORCEWAKE_RENDER),
-       GEN_FW_RANGE(0x1a000, 0x243ff, FORCEWAKE_BLITTER),
-       GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
-       GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER),
+       GEN_FW_RANGE(0x1a000, 0x23fff, FORCEWAKE_BLITTER),
+       GEN_FW_RANGE(0x24000, 0x2407f, 0),
+       GEN_FW_RANGE(0x24080, 0x2417f, FORCEWAKE_BLITTER),
+       GEN_FW_RANGE(0x24180, 0x242ff, FORCEWAKE_RENDER),
+       GEN_FW_RANGE(0x24300, 0x243ff, FORCEWAKE_BLITTER),
+       GEN_FW_RANGE(0x24400, 0x24fff, FORCEWAKE_RENDER),
+       GEN_FW_RANGE(0x25000, 0x3ffff, FORCEWAKE_BLITTER),
        GEN_FW_RANGE(0x40000, 0x1bffff, 0),
        GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
-       GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1),
-       GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0),
-       GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_BLITTER),
+       GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0),
+       GEN_FW_RANGE(0x1c8000, 0x1cffff, FORCEWAKE_MEDIA_VEBOX0),
        GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2),
-       GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3),
-       GEN_FW_RANGE(0x1d8000, 0x1dbfff, FORCEWAKE_MEDIA_VEBOX1)
+       GEN_FW_RANGE(0x1d4000, 0x1dbfff, 0)
 };
 
 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
index dcfa243892c67e8c842f5f08c987b08a1dc5fcc1..8d3aa8b9acf9fe9999cb8e7f425d31be31461e50 100644 (file)
@@ -209,7 +209,11 @@ void intel_uncore_forcewake_get(struct intel_uncore *uncore,
                                enum forcewake_domains domains);
 void intel_uncore_forcewake_put(struct intel_uncore *uncore,
                                enum forcewake_domains domains);
-/* Like above but the caller must manage the uncore.lock itself.
+void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
+                                 enum forcewake_domains fw_domains);
+
+/*
+ * Like above but the caller must manage the uncore.lock itself.
  * Must be used with I915_READ_FW and friends.
  */
 void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
index 8fbf6f4d3f26b7fe24a941dc13669d093a5f45d9..dfd87d082218078cb98ae368e812dec337f43cb3 100644 (file)
@@ -70,11 +70,12 @@ unlock:
 
 void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags)
 {
-       INTEL_WAKEREF_BUG_ON(work_pending(&wf->work));
+       INTEL_WAKEREF_BUG_ON(delayed_work_pending(&wf->work));
 
        /* Assume we are not in process context and so cannot sleep. */
        if (flags & INTEL_WAKEREF_PUT_ASYNC || !mutex_trylock(&wf->mutex)) {
-               schedule_work(&wf->work);
+               mod_delayed_work(system_wq, &wf->work,
+                                FIELD_GET(INTEL_WAKEREF_PUT_DELAY, flags));
                return;
        }
 
@@ -83,7 +84,7 @@ void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags)
 
 static void __intel_wakeref_put_work(struct work_struct *wrk)
 {
-       struct intel_wakeref *wf = container_of(wrk, typeof(*wf), work);
+       struct intel_wakeref *wf = container_of(wrk, typeof(*wf), work.work);
 
        if (atomic_add_unless(&wf->count, -1, 1))
                return;
@@ -104,8 +105,9 @@ void __intel_wakeref_init(struct intel_wakeref *wf,
        atomic_set(&wf->count, 0);
        wf->wakeref = 0;
 
-       INIT_WORK(&wf->work, __intel_wakeref_put_work);
-       lockdep_init_map(&wf->work.lockdep_map, "wakeref.work", &key->work, 0);
+       INIT_DELAYED_WORK(&wf->work, __intel_wakeref_put_work);
+       lockdep_init_map(&wf->work.work.lockdep_map,
+                        "wakeref.work", &key->work, 0);
 }
 
 int intel_wakeref_wait_for_idle(struct intel_wakeref *wf)
index 7d1e676b71ef7d4be0dffccebc13e7e074f44360..545c8f277c46013f6b6326d03e23fda372d99d70 100644 (file)
@@ -8,6 +8,7 @@
 #define INTEL_WAKEREF_H
 
 #include <linux/atomic.h>
+#include <linux/bitfield.h>
 #include <linux/bits.h>
 #include <linux/lockdep.h>
 #include <linux/mutex.h>
@@ -41,7 +42,7 @@ struct intel_wakeref {
        struct intel_runtime_pm *rpm;
        const struct intel_wakeref_ops *ops;
 
-       struct work_struct work;
+       struct delayed_work work;
 };
 
 struct intel_wakeref_lockclass {
@@ -117,6 +118,11 @@ intel_wakeref_get_if_active(struct intel_wakeref *wf)
        return atomic_inc_not_zero(&wf->count);
 }
 
+enum {
+       INTEL_WAKEREF_PUT_ASYNC_BIT = 0,
+       __INTEL_WAKEREF_PUT_LAST_BIT__
+};
+
 /**
  * intel_wakeref_put_flags: Release the wakeref
  * @wf: the wakeref
@@ -134,7 +140,9 @@ intel_wakeref_get_if_active(struct intel_wakeref *wf)
  */
 static inline void
 __intel_wakeref_put(struct intel_wakeref *wf, unsigned long flags)
-#define INTEL_WAKEREF_PUT_ASYNC BIT(0)
+#define INTEL_WAKEREF_PUT_ASYNC BIT(INTEL_WAKEREF_PUT_ASYNC_BIT)
+#define INTEL_WAKEREF_PUT_DELAY \
+       GENMASK(BITS_PER_LONG - 1, __INTEL_WAKEREF_PUT_LAST_BIT__)
 {
        INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
        if (unlikely(!atomic_add_unless(&wf->count, -1, 1)))
@@ -154,6 +162,14 @@ intel_wakeref_put_async(struct intel_wakeref *wf)
        __intel_wakeref_put(wf, INTEL_WAKEREF_PUT_ASYNC);
 }
 
+static inline void
+intel_wakeref_put_delay(struct intel_wakeref *wf, unsigned long delay)
+{
+       __intel_wakeref_put(wf,
+                           INTEL_WAKEREF_PUT_ASYNC |
+                           FIELD_PREP(INTEL_WAKEREF_PUT_DELAY, delay));
+}
+
 /**
  * intel_wakeref_lock: Lock the wakeref (mutex)
  * @wf: the wakeref
@@ -194,7 +210,7 @@ intel_wakeref_unlock_wait(struct intel_wakeref *wf)
 {
        mutex_lock(&wf->mutex);
        mutex_unlock(&wf->mutex);
-       flush_work(&wf->work);
+       flush_delayed_work(&wf->work);
 }
 
 /**
index 2bb9f9f9a50af48f1ba9c9c145ce56d4e1c53ae2..ec776591e1cfdb29c8abf4fdf033eb7b993bba02 100644 (file)
@@ -86,10 +86,10 @@ void intel_wopcm_init_early(struct intel_wopcm *wopcm)
        else
                wopcm->size = GEN9_WOPCM_SIZE;
 
-       DRM_DEV_DEBUG_DRIVER(i915->drm.dev, "WOPCM: %uK\n", wopcm->size / 1024);
+       drm_dbg(&i915->drm, "WOPCM: %uK\n", wopcm->size / 1024);
 }
 
-static inline u32 context_reserved_size(struct drm_i915_private *i915)
+static u32 context_reserved_size(struct drm_i915_private *i915)
 {
        if (IS_GEN9_LP(i915))
                return BXT_WOPCM_RC6_CTX_RESERVED;
@@ -99,8 +99,8 @@ static inline u32 context_reserved_size(struct drm_i915_private *i915)
                return 0;
 }
 
-static inline bool gen9_check_dword_gap(struct drm_i915_private *i915,
-                                       u32 guc_wopcm_base, u32 guc_wopcm_size)
+static bool gen9_check_dword_gap(struct drm_i915_private *i915,
+                                u32 guc_wopcm_base, u32 guc_wopcm_size)
 {
        u32 offset;
 
@@ -112,7 +112,7 @@ static inline bool gen9_check_dword_gap(struct drm_i915_private *i915,
        offset = guc_wopcm_base + GEN9_GUC_WOPCM_OFFSET;
        if (offset > guc_wopcm_size ||
            (guc_wopcm_size - offset) < sizeof(u32)) {
-               dev_err(i915->drm.dev,
+               drm_err(&i915->drm,
                        "WOPCM: invalid GuC region size: %uK < %uK\n",
                        guc_wopcm_size / SZ_1K,
                        (u32)(offset + sizeof(u32)) / SZ_1K);
@@ -122,8 +122,8 @@ static inline bool gen9_check_dword_gap(struct drm_i915_private *i915,
        return true;
 }
 
-static inline bool gen9_check_huc_fw_fits(struct drm_i915_private *i915,
-                                         u32 guc_wopcm_size, u32 huc_fw_size)
+static bool gen9_check_huc_fw_fits(struct drm_i915_private *i915,
+                                  u32 guc_wopcm_size, u32 huc_fw_size)
 {
        /*
         * On Gen9 & CNL A0, hardware requires the total available GuC WOPCM
@@ -131,7 +131,7 @@ static inline bool gen9_check_huc_fw_fits(struct drm_i915_private *i915,
         * firmware uploading would fail.
         */
        if (huc_fw_size > guc_wopcm_size - GUC_WOPCM_RESERVED) {
-               dev_err(i915->drm.dev, "WOPCM: no space for %s: %uK < %uK\n",
+               drm_err(&i915->drm, "WOPCM: no space for %s: %uK < %uK\n",
                        intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_HUC),
                        (guc_wopcm_size - GUC_WOPCM_RESERVED) / SZ_1K,
                        huc_fw_size / 1024);
@@ -141,32 +141,31 @@ static inline bool gen9_check_huc_fw_fits(struct drm_i915_private *i915,
        return true;
 }
 
-static inline bool check_hw_restrictions(struct drm_i915_private *i915,
-                                        u32 guc_wopcm_base, u32 guc_wopcm_size,
-                                        u32 huc_fw_size)
+static bool check_hw_restrictions(struct drm_i915_private *i915,
+                                 u32 guc_wopcm_base, u32 guc_wopcm_size,
+                                 u32 huc_fw_size)
 {
        if (IS_GEN(i915, 9) && !gen9_check_dword_gap(i915, guc_wopcm_base,
                                                     guc_wopcm_size))
                return false;
 
-       if ((IS_GEN(i915, 9) ||
-            IS_CNL_REVID(i915, CNL_REVID_A0, CNL_REVID_A0)) &&
+       if (IS_GEN(i915, 9) &&
            !gen9_check_huc_fw_fits(i915, guc_wopcm_size, huc_fw_size))
                return false;
 
        return true;
 }
 
-static inline bool __check_layout(struct drm_i915_private *i915, u32 wopcm_size,
-                                 u32 guc_wopcm_base, u32 guc_wopcm_size,
-                                 u32 guc_fw_size, u32 huc_fw_size)
+static bool __check_layout(struct drm_i915_private *i915, u32 wopcm_size,
+                          u32 guc_wopcm_base, u32 guc_wopcm_size,
+                          u32 guc_fw_size, u32 huc_fw_size)
 {
        const u32 ctx_rsvd = context_reserved_size(i915);
        u32 size;
 
        size = wopcm_size - ctx_rsvd;
        if (unlikely(range_overflows(guc_wopcm_base, guc_wopcm_size, size))) {
-               dev_err(i915->drm.dev,
+               drm_err(&i915->drm,
                        "WOPCM: invalid GuC region layout: %uK + %uK > %uK\n",
                        guc_wopcm_base / SZ_1K, guc_wopcm_size / SZ_1K,
                        size / SZ_1K);
@@ -175,7 +174,7 @@ static inline bool __check_layout(struct drm_i915_private *i915, u32 wopcm_size,
 
        size = guc_fw_size + GUC_WOPCM_RESERVED + GUC_WOPCM_STACK_RESERVED;
        if (unlikely(guc_wopcm_size < size)) {
-               dev_err(i915->drm.dev, "WOPCM: no space for %s: %uK < %uK\n",
+               drm_err(&i915->drm, "WOPCM: no space for %s: %uK < %uK\n",
                        intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_GUC),
                        guc_wopcm_size / SZ_1K, size / SZ_1K);
                return false;
@@ -183,7 +182,7 @@ static inline bool __check_layout(struct drm_i915_private *i915, u32 wopcm_size,
 
        size = huc_fw_size + WOPCM_RESERVED_SIZE;
        if (unlikely(guc_wopcm_base < size)) {
-               dev_err(i915->drm.dev, "WOPCM: no space for %s: %uK < %uK\n",
+               drm_err(&i915->drm, "WOPCM: no space for %s: %uK < %uK\n",
                        intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_HUC),
                        guc_wopcm_base / SZ_1K, size / SZ_1K);
                return false;
@@ -242,10 +241,8 @@ void intel_wopcm_init(struct intel_wopcm *wopcm)
                return;
 
        if (__wopcm_regs_locked(gt->uncore, &guc_wopcm_base, &guc_wopcm_size)) {
-               DRM_DEV_DEBUG_DRIVER(i915->drm.dev,
-                                    "GuC WOPCM is already locked [%uK, %uK)\n",
-                                    guc_wopcm_base / SZ_1K,
-                                    guc_wopcm_size / SZ_1K);
+               drm_dbg(&i915->drm, "GuC WOPCM is already locked [%uK, %uK)\n",
+                       guc_wopcm_base / SZ_1K, guc_wopcm_size / SZ_1K);
                goto check;
        }
 
@@ -266,8 +263,8 @@ void intel_wopcm_init(struct intel_wopcm *wopcm)
        guc_wopcm_size = wopcm->size - ctx_rsvd - guc_wopcm_base;
        guc_wopcm_size &= GUC_WOPCM_SIZE_MASK;
 
-       DRM_DEV_DEBUG_DRIVER(i915->drm.dev, "Calculated GuC WOPCM [%uK, %uK)\n",
-                            guc_wopcm_base / SZ_1K, guc_wopcm_size / SZ_1K);
+       drm_dbg(&i915->drm, "Calculated GuC WOPCM [%uK, %uK)\n",
+               guc_wopcm_base / SZ_1K, guc_wopcm_size / SZ_1K);
 
 check:
        if (__check_layout(i915, wopcm->size, guc_wopcm_base, guc_wopcm_size,
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_bdw.c b/drivers/gpu/drm/i915/oa/i915_oa_bdw.c
deleted file mode 100644 (file)
index 14da5c3..0000000
+++ /dev/null
@@ -1,90 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_bdw.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
-       { _MMIO(0x2740), 0x00000000 },
-       { _MMIO(0x2744), 0x00800000 },
-       { _MMIO(0x2714), 0xf0800000 },
-       { _MMIO(0x2710), 0x00000000 },
-       { _MMIO(0x2724), 0xf0800000 },
-       { _MMIO(0x2720), 0x00000000 },
-       { _MMIO(0x2770), 0x00000004 },
-       { _MMIO(0x2774), 0x00000000 },
-       { _MMIO(0x2778), 0x00000003 },
-       { _MMIO(0x277c), 0x00000000 },
-       { _MMIO(0x2780), 0x00000007 },
-       { _MMIO(0x2784), 0x00000000 },
-       { _MMIO(0x2788), 0x00100002 },
-       { _MMIO(0x278c), 0x0000fff7 },
-       { _MMIO(0x2790), 0x00100002 },
-       { _MMIO(0x2794), 0x0000ffcf },
-       { _MMIO(0x2798), 0x00100082 },
-       { _MMIO(0x279c), 0x0000ffef },
-       { _MMIO(0x27a0), 0x001000c2 },
-       { _MMIO(0x27a4), 0x0000ffe7 },
-       { _MMIO(0x27a8), 0x00100001 },
-       { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
-       { _MMIO(0x9840), 0x000000a0 },
-       { _MMIO(0x9888), 0x198b0000 },
-       { _MMIO(0x9888), 0x078b0066 },
-       { _MMIO(0x9888), 0x118b0000 },
-       { _MMIO(0x9888), 0x258b0000 },
-       { _MMIO(0x9888), 0x21850008 },
-       { _MMIO(0x9888), 0x0d834000 },
-       { _MMIO(0x9888), 0x07844000 },
-       { _MMIO(0x9888), 0x17804000 },
-       { _MMIO(0x9888), 0x21800000 },
-       { _MMIO(0x9888), 0x4f800000 },
-       { _MMIO(0x9888), 0x41800000 },
-       { _MMIO(0x9888), 0x31800000 },
-       { _MMIO(0x9840), 0x00000080 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
-       return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_bdw(struct drm_i915_private *dev_priv)
-{
-       strlcpy(dev_priv->perf.test_config.uuid,
-               "d6de6f55-e526-4f79-a6a6-d7315c09044e",
-               sizeof(dev_priv->perf.test_config.uuid));
-       dev_priv->perf.test_config.id = 1;
-
-       dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
-       dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
-       dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
-       dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
-       dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
-       dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
-       dev_priv->perf.test_config.sysfs_metric.name = "d6de6f55-e526-4f79-a6a6-d7315c09044e";
-       dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
-       dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
-       dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
-       dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
-       dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_bdw.h b/drivers/gpu/drm/i915/oa/i915_oa_bdw.h
deleted file mode 100644 (file)
index 0cee333..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_BDW_H__
-#define __I915_OA_BDW_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_bdw(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_bxt.c b/drivers/gpu/drm/i915/oa/i915_oa_bxt.c
deleted file mode 100644 (file)
index 3e785ba..0000000
+++ /dev/null
@@ -1,88 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_bxt.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
-       { _MMIO(0x2740), 0x00000000 },
-       { _MMIO(0x2744), 0x00800000 },
-       { _MMIO(0x2714), 0xf0800000 },
-       { _MMIO(0x2710), 0x00000000 },
-       { _MMIO(0x2724), 0xf0800000 },
-       { _MMIO(0x2720), 0x00000000 },
-       { _MMIO(0x2770), 0x00000004 },
-       { _MMIO(0x2774), 0x00000000 },
-       { _MMIO(0x2778), 0x00000003 },
-       { _MMIO(0x277c), 0x00000000 },
-       { _MMIO(0x2780), 0x00000007 },
-       { _MMIO(0x2784), 0x00000000 },
-       { _MMIO(0x2788), 0x00100002 },
-       { _MMIO(0x278c), 0x0000fff7 },
-       { _MMIO(0x2790), 0x00100002 },
-       { _MMIO(0x2794), 0x0000ffcf },
-       { _MMIO(0x2798), 0x00100082 },
-       { _MMIO(0x279c), 0x0000ffef },
-       { _MMIO(0x27a0), 0x001000c2 },
-       { _MMIO(0x27a4), 0x0000ffe7 },
-       { _MMIO(0x27a8), 0x00100001 },
-       { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
-       { _MMIO(0x9840), 0x00000080 },
-       { _MMIO(0x9888), 0x19800000 },
-       { _MMIO(0x9888), 0x07800063 },
-       { _MMIO(0x9888), 0x11800000 },
-       { _MMIO(0x9888), 0x23810008 },
-       { _MMIO(0x9888), 0x1d950400 },
-       { _MMIO(0x9888), 0x0f922000 },
-       { _MMIO(0x9888), 0x1f908000 },
-       { _MMIO(0x9888), 0x37900000 },
-       { _MMIO(0x9888), 0x55900000 },
-       { _MMIO(0x9888), 0x47900000 },
-       { _MMIO(0x9888), 0x33900000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
-       return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_bxt(struct drm_i915_private *dev_priv)
-{
-       strlcpy(dev_priv->perf.test_config.uuid,
-               "5ee72f5c-092f-421e-8b70-225f7c3e9612",
-               sizeof(dev_priv->perf.test_config.uuid));
-       dev_priv->perf.test_config.id = 1;
-
-       dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
-       dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
-       dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
-       dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
-       dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
-       dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
-       dev_priv->perf.test_config.sysfs_metric.name = "5ee72f5c-092f-421e-8b70-225f7c3e9612";
-       dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
-       dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
-       dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
-       dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
-       dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_bxt.h b/drivers/gpu/drm/i915/oa/i915_oa_bxt.h
deleted file mode 100644 (file)
index 0bdf391..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_BXT_H__
-#define __I915_OA_BXT_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_bxt(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c b/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c
deleted file mode 100644 (file)
index 0ea86f7..0000000
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_cflgt2.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
-       { _MMIO(0x2740), 0x00000000 },
-       { _MMIO(0x2744), 0x00800000 },
-       { _MMIO(0x2714), 0xf0800000 },
-       { _MMIO(0x2710), 0x00000000 },
-       { _MMIO(0x2724), 0xf0800000 },
-       { _MMIO(0x2720), 0x00000000 },
-       { _MMIO(0x2770), 0x00000004 },
-       { _MMIO(0x2774), 0x00000000 },
-       { _MMIO(0x2778), 0x00000003 },
-       { _MMIO(0x277c), 0x00000000 },
-       { _MMIO(0x2780), 0x00000007 },
-       { _MMIO(0x2784), 0x00000000 },
-       { _MMIO(0x2788), 0x00100002 },
-       { _MMIO(0x278c), 0x0000fff7 },
-       { _MMIO(0x2790), 0x00100002 },
-       { _MMIO(0x2794), 0x0000ffcf },
-       { _MMIO(0x2798), 0x00100082 },
-       { _MMIO(0x279c), 0x0000ffef },
-       { _MMIO(0x27a0), 0x001000c2 },
-       { _MMIO(0x27a4), 0x0000ffe7 },
-       { _MMIO(0x27a8), 0x00100001 },
-       { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
-       { _MMIO(0x9840), 0x00000080 },
-       { _MMIO(0x9888), 0x11810000 },
-       { _MMIO(0x9888), 0x07810013 },
-       { _MMIO(0x9888), 0x1f810000 },
-       { _MMIO(0x9888), 0x1d810000 },
-       { _MMIO(0x9888), 0x1b930040 },
-       { _MMIO(0x9888), 0x07e54000 },
-       { _MMIO(0x9888), 0x1f908000 },
-       { _MMIO(0x9888), 0x11900000 },
-       { _MMIO(0x9888), 0x37900000 },
-       { _MMIO(0x9888), 0x53900000 },
-       { _MMIO(0x9888), 0x45900000 },
-       { _MMIO(0x9888), 0x33900000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
-       return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv)
-{
-       strlcpy(dev_priv->perf.test_config.uuid,
-               "74fb4902-d3d3-4237-9e90-cbdc68d0a446",
-               sizeof(dev_priv->perf.test_config.uuid));
-       dev_priv->perf.test_config.id = 1;
-
-       dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
-       dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
-       dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
-       dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
-       dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
-       dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
-       dev_priv->perf.test_config.sysfs_metric.name = "74fb4902-d3d3-4237-9e90-cbdc68d0a446";
-       dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
-       dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
-       dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
-       dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
-       dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h b/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h
deleted file mode 100644 (file)
index 6b86228..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_CFLGT2_H__
-#define __I915_OA_CFLGT2_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c b/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c
deleted file mode 100644 (file)
index fc632dd..0000000
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_cflgt3.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
-       { _MMIO(0x2740), 0x00000000 },
-       { _MMIO(0x2744), 0x00800000 },
-       { _MMIO(0x2714), 0xf0800000 },
-       { _MMIO(0x2710), 0x00000000 },
-       { _MMIO(0x2724), 0xf0800000 },
-       { _MMIO(0x2720), 0x00000000 },
-       { _MMIO(0x2770), 0x00000004 },
-       { _MMIO(0x2774), 0x00000000 },
-       { _MMIO(0x2778), 0x00000003 },
-       { _MMIO(0x277c), 0x00000000 },
-       { _MMIO(0x2780), 0x00000007 },
-       { _MMIO(0x2784), 0x00000000 },
-       { _MMIO(0x2788), 0x00100002 },
-       { _MMIO(0x278c), 0x0000fff7 },
-       { _MMIO(0x2790), 0x00100002 },
-       { _MMIO(0x2794), 0x0000ffcf },
-       { _MMIO(0x2798), 0x00100082 },
-       { _MMIO(0x279c), 0x0000ffef },
-       { _MMIO(0x27a0), 0x001000c2 },
-       { _MMIO(0x27a4), 0x0000ffe7 },
-       { _MMIO(0x27a8), 0x00100001 },
-       { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
-       { _MMIO(0x9840), 0x00000080 },
-       { _MMIO(0x9888), 0x11810000 },
-       { _MMIO(0x9888), 0x07810013 },
-       { _MMIO(0x9888), 0x1f810000 },
-       { _MMIO(0x9888), 0x1d810000 },
-       { _MMIO(0x9888), 0x1b930040 },
-       { _MMIO(0x9888), 0x07e54000 },
-       { _MMIO(0x9888), 0x1f908000 },
-       { _MMIO(0x9888), 0x11900000 },
-       { _MMIO(0x9888), 0x37900000 },
-       { _MMIO(0x9888), 0x53900000 },
-       { _MMIO(0x9888), 0x45900000 },
-       { _MMIO(0x9888), 0x33900000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
-       return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv)
-{
-       strlcpy(dev_priv->perf.test_config.uuid,
-               "577e8e2c-3fa0-4875-8743-3538d585e3b0",
-               sizeof(dev_priv->perf.test_config.uuid));
-       dev_priv->perf.test_config.id = 1;
-
-       dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
-       dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
-       dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
-       dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
-       dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
-       dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
-       dev_priv->perf.test_config.sysfs_metric.name = "577e8e2c-3fa0-4875-8743-3538d585e3b0";
-       dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
-       dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
-       dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
-       dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
-       dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h b/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h
deleted file mode 100644 (file)
index 4ca9d8f..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_CFLGT3_H__
-#define __I915_OA_CFLGT3_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_chv.c b/drivers/gpu/drm/i915/oa/i915_oa_chv.c
deleted file mode 100644 (file)
index 6cd4e99..0000000
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_chv.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
-       { _MMIO(0x2740), 0x00000000 },
-       { _MMIO(0x2744), 0x00800000 },
-       { _MMIO(0x2714), 0xf0800000 },
-       { _MMIO(0x2710), 0x00000000 },
-       { _MMIO(0x2724), 0xf0800000 },
-       { _MMIO(0x2720), 0x00000000 },
-       { _MMIO(0x2770), 0x00000004 },
-       { _MMIO(0x2774), 0x00000000 },
-       { _MMIO(0x2778), 0x00000003 },
-       { _MMIO(0x277c), 0x00000000 },
-       { _MMIO(0x2780), 0x00000007 },
-       { _MMIO(0x2784), 0x00000000 },
-       { _MMIO(0x2788), 0x00100002 },
-       { _MMIO(0x278c), 0x0000fff7 },
-       { _MMIO(0x2790), 0x00100002 },
-       { _MMIO(0x2794), 0x0000ffcf },
-       { _MMIO(0x2798), 0x00100082 },
-       { _MMIO(0x279c), 0x0000ffef },
-       { _MMIO(0x27a0), 0x001000c2 },
-       { _MMIO(0x27a4), 0x0000ffe7 },
-       { _MMIO(0x27a8), 0x00100001 },
-       { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
-       { _MMIO(0x9840), 0x000000a0 },
-       { _MMIO(0x9888), 0x59800000 },
-       { _MMIO(0x9888), 0x59800001 },
-       { _MMIO(0x9888), 0x338b0000 },
-       { _MMIO(0x9888), 0x258b0066 },
-       { _MMIO(0x9888), 0x058b0000 },
-       { _MMIO(0x9888), 0x038b0000 },
-       { _MMIO(0x9888), 0x03844000 },
-       { _MMIO(0x9888), 0x47800080 },
-       { _MMIO(0x9888), 0x57800000 },
-       { _MMIO(0x1823a4), 0x00000000 },
-       { _MMIO(0x9888), 0x59800000 },
-       { _MMIO(0x9840), 0x00000080 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
-       return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_chv(struct drm_i915_private *dev_priv)
-{
-       strlcpy(dev_priv->perf.test_config.uuid,
-               "4a534b07-cba3-414d-8d60-874830e883aa",
-               sizeof(dev_priv->perf.test_config.uuid));
-       dev_priv->perf.test_config.id = 1;
-
-       dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
-       dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
-       dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
-       dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
-       dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
-       dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
-       dev_priv->perf.test_config.sysfs_metric.name = "4a534b07-cba3-414d-8d60-874830e883aa";
-       dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
-       dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
-       dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
-       dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
-       dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_chv.h b/drivers/gpu/drm/i915/oa/i915_oa_chv.h
deleted file mode 100644 (file)
index 3cac7bb..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_CHV_H__
-#define __I915_OA_CHV_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_chv(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cnl.c b/drivers/gpu/drm/i915/oa/i915_oa_cnl.c
deleted file mode 100644 (file)
index 1041e89..0000000
+++ /dev/null
@@ -1,101 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_cnl.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
-       { _MMIO(0x2740), 0x00000000 },
-       { _MMIO(0x2710), 0x00000000 },
-       { _MMIO(0x2714), 0xf0800000 },
-       { _MMIO(0x2720), 0x00000000 },
-       { _MMIO(0x2724), 0xf0800000 },
-       { _MMIO(0x2770), 0x00000004 },
-       { _MMIO(0x2774), 0x0000ffff },
-       { _MMIO(0x2778), 0x00000003 },
-       { _MMIO(0x277c), 0x0000ffff },
-       { _MMIO(0x2780), 0x00000007 },
-       { _MMIO(0x2784), 0x0000ffff },
-       { _MMIO(0x2788), 0x00100002 },
-       { _MMIO(0x278c), 0x0000fff7 },
-       { _MMIO(0x2790), 0x00100002 },
-       { _MMIO(0x2794), 0x0000ffcf },
-       { _MMIO(0x2798), 0x00100082 },
-       { _MMIO(0x279c), 0x0000ffef },
-       { _MMIO(0x27a0), 0x001000c2 },
-       { _MMIO(0x27a4), 0x0000ffe7 },
-       { _MMIO(0x27a8), 0x00100001 },
-       { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
-       { _MMIO(0xd04), 0x00000200 },
-       { _MMIO(0x9884), 0x00000007 },
-       { _MMIO(0x9888), 0x17060000 },
-       { _MMIO(0x9840), 0x00000000 },
-       { _MMIO(0x9884), 0x00000007 },
-       { _MMIO(0x9888), 0x13034000 },
-       { _MMIO(0x9884), 0x00000007 },
-       { _MMIO(0x9888), 0x07060066 },
-       { _MMIO(0x9884), 0x00000007 },
-       { _MMIO(0x9888), 0x05060000 },
-       { _MMIO(0x9884), 0x00000007 },
-       { _MMIO(0x9888), 0x0f080040 },
-       { _MMIO(0x9884), 0x00000007 },
-       { _MMIO(0x9888), 0x07091000 },
-       { _MMIO(0x9884), 0x00000007 },
-       { _MMIO(0x9888), 0x0f041000 },
-       { _MMIO(0x9884), 0x00000007 },
-       { _MMIO(0x9888), 0x1d004000 },
-       { _MMIO(0x9884), 0x00000007 },
-       { _MMIO(0x9888), 0x35000000 },
-       { _MMIO(0x9884), 0x00000007 },
-       { _MMIO(0x9888), 0x49000000 },
-       { _MMIO(0x9884), 0x00000007 },
-       { _MMIO(0x9888), 0x3d000000 },
-       { _MMIO(0x9884), 0x00000007 },
-       { _MMIO(0x9888), 0x31000000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
-       return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv)
-{
-       strlcpy(dev_priv->perf.test_config.uuid,
-               "db41edd4-d8e7-4730-ad11-b9a2d6833503",
-               sizeof(dev_priv->perf.test_config.uuid));
-       dev_priv->perf.test_config.id = 1;
-
-       dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
-       dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
-       dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
-       dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
-       dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
-       dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
-       dev_priv->perf.test_config.sysfs_metric.name = "db41edd4-d8e7-4730-ad11-b9a2d6833503";
-       dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
-       dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
-       dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
-       dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
-       dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cnl.h b/drivers/gpu/drm/i915/oa/i915_oa_cnl.h
deleted file mode 100644 (file)
index db379f5..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_CNL_H__
-#define __I915_OA_CNL_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_glk.c b/drivers/gpu/drm/i915/oa/i915_oa_glk.c
deleted file mode 100644 (file)
index bd15ebe..0000000
+++ /dev/null
@@ -1,88 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_glk.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
-       { _MMIO(0x2740), 0x00000000 },
-       { _MMIO(0x2744), 0x00800000 },
-       { _MMIO(0x2714), 0xf0800000 },
-       { _MMIO(0x2710), 0x00000000 },
-       { _MMIO(0x2724), 0xf0800000 },
-       { _MMIO(0x2720), 0x00000000 },
-       { _MMIO(0x2770), 0x00000004 },
-       { _MMIO(0x2774), 0x00000000 },
-       { _MMIO(0x2778), 0x00000003 },
-       { _MMIO(0x277c), 0x00000000 },
-       { _MMIO(0x2780), 0x00000007 },
-       { _MMIO(0x2784), 0x00000000 },
-       { _MMIO(0x2788), 0x00100002 },
-       { _MMIO(0x278c), 0x0000fff7 },
-       { _MMIO(0x2790), 0x00100002 },
-       { _MMIO(0x2794), 0x0000ffcf },
-       { _MMIO(0x2798), 0x00100082 },
-       { _MMIO(0x279c), 0x0000ffef },
-       { _MMIO(0x27a0), 0x001000c2 },
-       { _MMIO(0x27a4), 0x0000ffe7 },
-       { _MMIO(0x27a8), 0x00100001 },
-       { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
-       { _MMIO(0x9840), 0x00000080 },
-       { _MMIO(0x9888), 0x19800000 },
-       { _MMIO(0x9888), 0x07800063 },
-       { _MMIO(0x9888), 0x11800000 },
-       { _MMIO(0x9888), 0x23810008 },
-       { _MMIO(0x9888), 0x1d950400 },
-       { _MMIO(0x9888), 0x0f922000 },
-       { _MMIO(0x9888), 0x1f908000 },
-       { _MMIO(0x9888), 0x37900000 },
-       { _MMIO(0x9888), 0x55900000 },
-       { _MMIO(0x9888), 0x47900000 },
-       { _MMIO(0x9888), 0x33900000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
-       return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_glk(struct drm_i915_private *dev_priv)
-{
-       strlcpy(dev_priv->perf.test_config.uuid,
-               "dd3fd789-e783-4204-8cd0-b671bbccb0cf",
-               sizeof(dev_priv->perf.test_config.uuid));
-       dev_priv->perf.test_config.id = 1;
-
-       dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
-       dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
-       dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
-       dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
-       dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
-       dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
-       dev_priv->perf.test_config.sysfs_metric.name = "dd3fd789-e783-4204-8cd0-b671bbccb0cf";
-       dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
-       dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
-       dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
-       dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
-       dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_glk.h b/drivers/gpu/drm/i915/oa/i915_oa_glk.h
deleted file mode 100644 (file)
index 779f343..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_GLK_H__
-#define __I915_OA_GLK_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_glk(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_hsw.c b/drivers/gpu/drm/i915/oa/i915_oa_hsw.c
deleted file mode 100644 (file)
index 133721a..0000000
+++ /dev/null
@@ -1,118 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_hsw.h"
-
-static const struct i915_oa_reg b_counter_config_render_basic[] = {
-       { _MMIO(0x2724), 0x00800000 },
-       { _MMIO(0x2720), 0x00000000 },
-       { _MMIO(0x2714), 0x00800000 },
-       { _MMIO(0x2710), 0x00000000 },
-};
-
-static const struct i915_oa_reg flex_eu_config_render_basic[] = {
-};
-
-static const struct i915_oa_reg mux_config_render_basic[] = {
-       { _MMIO(0x9840), 0x00000080 },
-       { _MMIO(0x253a4), 0x01600000 },
-       { _MMIO(0x25440), 0x00100000 },
-       { _MMIO(0x25128), 0x00000000 },
-       { _MMIO(0x2691c), 0x00000800 },
-       { _MMIO(0x26aa0), 0x01500000 },
-       { _MMIO(0x26b9c), 0x00006000 },
-       { _MMIO(0x2791c), 0x00000800 },
-       { _MMIO(0x27aa0), 0x01500000 },
-       { _MMIO(0x27b9c), 0x00006000 },
-       { _MMIO(0x2641c), 0x00000400 },
-       { _MMIO(0x25380), 0x00000010 },
-       { _MMIO(0x2538c), 0x00000000 },
-       { _MMIO(0x25384), 0x0800aaaa },
-       { _MMIO(0x25400), 0x00000004 },
-       { _MMIO(0x2540c), 0x06029000 },
-       { _MMIO(0x25410), 0x00000002 },
-       { _MMIO(0x25404), 0x5c30ffff },
-       { _MMIO(0x25100), 0x00000016 },
-       { _MMIO(0x25110), 0x00000400 },
-       { _MMIO(0x25104), 0x00000000 },
-       { _MMIO(0x26804), 0x00001211 },
-       { _MMIO(0x26884), 0x00000100 },
-       { _MMIO(0x26900), 0x00000002 },
-       { _MMIO(0x26908), 0x00700000 },
-       { _MMIO(0x26904), 0x00000000 },
-       { _MMIO(0x26984), 0x00001022 },
-       { _MMIO(0x26a04), 0x00000011 },
-       { _MMIO(0x26a80), 0x00000006 },
-       { _MMIO(0x26a88), 0x00000c02 },
-       { _MMIO(0x26a84), 0x00000000 },
-       { _MMIO(0x26b04), 0x00001000 },
-       { _MMIO(0x26b80), 0x00000002 },
-       { _MMIO(0x26b8c), 0x00000007 },
-       { _MMIO(0x26b84), 0x00000000 },
-       { _MMIO(0x27804), 0x00004844 },
-       { _MMIO(0x27884), 0x00000400 },
-       { _MMIO(0x27900), 0x00000002 },
-       { _MMIO(0x27908), 0x0e000000 },
-       { _MMIO(0x27904), 0x00000000 },
-       { _MMIO(0x27984), 0x00004088 },
-       { _MMIO(0x27a04), 0x00000044 },
-       { _MMIO(0x27a80), 0x00000006 },
-       { _MMIO(0x27a88), 0x00018040 },
-       { _MMIO(0x27a84), 0x00000000 },
-       { _MMIO(0x27b04), 0x00004000 },
-       { _MMIO(0x27b80), 0x00000002 },
-       { _MMIO(0x27b8c), 0x000000e0 },
-       { _MMIO(0x27b84), 0x00000000 },
-       { _MMIO(0x26104), 0x00002222 },
-       { _MMIO(0x26184), 0x0c006666 },
-       { _MMIO(0x26284), 0x04000000 },
-       { _MMIO(0x26304), 0x04000000 },
-       { _MMIO(0x26400), 0x00000002 },
-       { _MMIO(0x26410), 0x000000a0 },
-       { _MMIO(0x26404), 0x00000000 },
-       { _MMIO(0x25420), 0x04108020 },
-       { _MMIO(0x25424), 0x1284a420 },
-       { _MMIO(0x2541c), 0x00000000 },
-       { _MMIO(0x25428), 0x00042049 },
-};
-
-static ssize_t
-show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
-       return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_hsw(struct drm_i915_private *dev_priv)
-{
-       strlcpy(dev_priv->perf.test_config.uuid,
-               "403d8832-1a27-4aa6-a64e-f5389ce7b212",
-               sizeof(dev_priv->perf.test_config.uuid));
-       dev_priv->perf.test_config.id = 1;
-
-       dev_priv->perf.test_config.mux_regs = mux_config_render_basic;
-       dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_render_basic);
-
-       dev_priv->perf.test_config.b_counter_regs = b_counter_config_render_basic;
-       dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_render_basic);
-
-       dev_priv->perf.test_config.flex_regs = flex_eu_config_render_basic;
-       dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_render_basic);
-
-       dev_priv->perf.test_config.sysfs_metric.name = "403d8832-1a27-4aa6-a64e-f5389ce7b212";
-       dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
-       dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
-       dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
-       dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
-       dev_priv->perf.test_config.sysfs_metric_id.show = show_render_basic_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_hsw.h b/drivers/gpu/drm/i915/oa/i915_oa_hsw.h
deleted file mode 100644 (file)
index ba97f73..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_HSW_H__
-#define __I915_OA_HSW_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_hsw(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_icl.c b/drivers/gpu/drm/i915/oa/i915_oa_icl.c
deleted file mode 100644 (file)
index 2d92041..0000000
+++ /dev/null
@@ -1,98 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_icl.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
-       { _MMIO(0x2740), 0x00000000 },
-       { _MMIO(0x2710), 0x00000000 },
-       { _MMIO(0x2714), 0xf0800000 },
-       { _MMIO(0x2720), 0x00000000 },
-       { _MMIO(0x2724), 0xf0800000 },
-       { _MMIO(0x2770), 0x00000004 },
-       { _MMIO(0x2774), 0x0000ffff },
-       { _MMIO(0x2778), 0x00000003 },
-       { _MMIO(0x277c), 0x0000ffff },
-       { _MMIO(0x2780), 0x00000007 },
-       { _MMIO(0x2784), 0x0000ffff },
-       { _MMIO(0x2788), 0x00100002 },
-       { _MMIO(0x278c), 0x0000fff7 },
-       { _MMIO(0x2790), 0x00100002 },
-       { _MMIO(0x2794), 0x0000ffcf },
-       { _MMIO(0x2798), 0x00100082 },
-       { _MMIO(0x279c), 0x0000ffef },
-       { _MMIO(0x27a0), 0x001000c2 },
-       { _MMIO(0x27a4), 0x0000ffe7 },
-       { _MMIO(0x27a8), 0x00100001 },
-       { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
-       { _MMIO(0xd04), 0x00000200 },
-       { _MMIO(0x9840), 0x00000000 },
-       { _MMIO(0x9884), 0x00000000 },
-       { _MMIO(0x9888), 0x10060000 },
-       { _MMIO(0x9888), 0x22060000 },
-       { _MMIO(0x9888), 0x16060000 },
-       { _MMIO(0x9888), 0x24060000 },
-       { _MMIO(0x9888), 0x18060000 },
-       { _MMIO(0x9888), 0x1a060000 },
-       { _MMIO(0x9888), 0x12060000 },
-       { _MMIO(0x9888), 0x14060000 },
-       { _MMIO(0x9888), 0x10060000 },
-       { _MMIO(0x9888), 0x22060000 },
-       { _MMIO(0x9884), 0x00000003 },
-       { _MMIO(0x9888), 0x16130000 },
-       { _MMIO(0x9888), 0x24000001 },
-       { _MMIO(0x9888), 0x0e130056 },
-       { _MMIO(0x9888), 0x10130000 },
-       { _MMIO(0x9888), 0x1a130000 },
-       { _MMIO(0x9888), 0x541f0001 },
-       { _MMIO(0x9888), 0x181f0000 },
-       { _MMIO(0x9888), 0x4c1f0000 },
-       { _MMIO(0x9888), 0x301f0000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
-       return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_icl(struct drm_i915_private *dev_priv)
-{
-       strlcpy(dev_priv->perf.test_config.uuid,
-               "a291665e-244b-4b76-9b9a-01de9d3c8068",
-               sizeof(dev_priv->perf.test_config.uuid));
-       dev_priv->perf.test_config.id = 1;
-
-       dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
-       dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
-       dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
-       dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
-       dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
-       dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
-       dev_priv->perf.test_config.sysfs_metric.name = "a291665e-244b-4b76-9b9a-01de9d3c8068";
-       dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
-       dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
-       dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
-       dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
-       dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_icl.h b/drivers/gpu/drm/i915/oa/i915_oa_icl.h
deleted file mode 100644 (file)
index 5c64112..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_ICL_H__
-#define __I915_OA_ICL_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_icl(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c b/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c
deleted file mode 100644 (file)
index 1c3a67c..0000000
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_kblgt2.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
-       { _MMIO(0x2740), 0x00000000 },
-       { _MMIO(0x2744), 0x00800000 },
-       { _MMIO(0x2714), 0xf0800000 },
-       { _MMIO(0x2710), 0x00000000 },
-       { _MMIO(0x2724), 0xf0800000 },
-       { _MMIO(0x2720), 0x00000000 },
-       { _MMIO(0x2770), 0x00000004 },
-       { _MMIO(0x2774), 0x00000000 },
-       { _MMIO(0x2778), 0x00000003 },
-       { _MMIO(0x277c), 0x00000000 },
-       { _MMIO(0x2780), 0x00000007 },
-       { _MMIO(0x2784), 0x00000000 },
-       { _MMIO(0x2788), 0x00100002 },
-       { _MMIO(0x278c), 0x0000fff7 },
-       { _MMIO(0x2790), 0x00100002 },
-       { _MMIO(0x2794), 0x0000ffcf },
-       { _MMIO(0x2798), 0x00100082 },
-       { _MMIO(0x279c), 0x0000ffef },
-       { _MMIO(0x27a0), 0x001000c2 },
-       { _MMIO(0x27a4), 0x0000ffe7 },
-       { _MMIO(0x27a8), 0x00100001 },
-       { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
-       { _MMIO(0x9840), 0x00000080 },
-       { _MMIO(0x9888), 0x11810000 },
-       { _MMIO(0x9888), 0x07810013 },
-       { _MMIO(0x9888), 0x1f810000 },
-       { _MMIO(0x9888), 0x1d810000 },
-       { _MMIO(0x9888), 0x1b930040 },
-       { _MMIO(0x9888), 0x07e54000 },
-       { _MMIO(0x9888), 0x1f908000 },
-       { _MMIO(0x9888), 0x11900000 },
-       { _MMIO(0x9888), 0x37900000 },
-       { _MMIO(0x9888), 0x53900000 },
-       { _MMIO(0x9888), 0x45900000 },
-       { _MMIO(0x9888), 0x33900000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
-       return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_kblgt2(struct drm_i915_private *dev_priv)
-{
-       strlcpy(dev_priv->perf.test_config.uuid,
-               "baa3c7e4-52b6-4b85-801e-465a94b746dd",
-               sizeof(dev_priv->perf.test_config.uuid));
-       dev_priv->perf.test_config.id = 1;
-
-       dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
-       dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
-       dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
-       dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
-       dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
-       dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
-       dev_priv->perf.test_config.sysfs_metric.name = "baa3c7e4-52b6-4b85-801e-465a94b746dd";
-       dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
-       dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
-       dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
-       dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
-       dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h b/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h
deleted file mode 100644 (file)
index 810532f..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_KBLGT2_H__
-#define __I915_OA_KBLGT2_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_kblgt2(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c b/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c
deleted file mode 100644 (file)
index ebbe5a9..0000000
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_kblgt3.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
-       { _MMIO(0x2740), 0x00000000 },
-       { _MMIO(0x2744), 0x00800000 },
-       { _MMIO(0x2714), 0xf0800000 },
-       { _MMIO(0x2710), 0x00000000 },
-       { _MMIO(0x2724), 0xf0800000 },
-       { _MMIO(0x2720), 0x00000000 },
-       { _MMIO(0x2770), 0x00000004 },
-       { _MMIO(0x2774), 0x00000000 },
-       { _MMIO(0x2778), 0x00000003 },
-       { _MMIO(0x277c), 0x00000000 },
-       { _MMIO(0x2780), 0x00000007 },
-       { _MMIO(0x2784), 0x00000000 },
-       { _MMIO(0x2788), 0x00100002 },
-       { _MMIO(0x278c), 0x0000fff7 },
-       { _MMIO(0x2790), 0x00100002 },
-       { _MMIO(0x2794), 0x0000ffcf },
-       { _MMIO(0x2798), 0x00100082 },
-       { _MMIO(0x279c), 0x0000ffef },
-       { _MMIO(0x27a0), 0x001000c2 },
-       { _MMIO(0x27a4), 0x0000ffe7 },
-       { _MMIO(0x27a8), 0x00100001 },
-       { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
-       { _MMIO(0x9840), 0x00000080 },
-       { _MMIO(0x9888), 0x11810000 },
-       { _MMIO(0x9888), 0x07810013 },
-       { _MMIO(0x9888), 0x1f810000 },
-       { _MMIO(0x9888), 0x1d810000 },
-       { _MMIO(0x9888), 0x1b930040 },
-       { _MMIO(0x9888), 0x07e54000 },
-       { _MMIO(0x9888), 0x1f908000 },
-       { _MMIO(0x9888), 0x11900000 },
-       { _MMIO(0x9888), 0x37900000 },
-       { _MMIO(0x9888), 0x53900000 },
-       { _MMIO(0x9888), 0x45900000 },
-       { _MMIO(0x9888), 0x33900000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
-       return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_kblgt3(struct drm_i915_private *dev_priv)
-{
-       strlcpy(dev_priv->perf.test_config.uuid,
-               "f1792f32-6db2-4b50-b4b2-557128f1688d",
-               sizeof(dev_priv->perf.test_config.uuid));
-       dev_priv->perf.test_config.id = 1;
-
-       dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
-       dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
-       dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
-       dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
-       dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
-       dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
-       dev_priv->perf.test_config.sysfs_metric.name = "f1792f32-6db2-4b50-b4b2-557128f1688d";
-       dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
-       dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
-       dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
-       dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
-       dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h b/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h
deleted file mode 100644 (file)
index 13d7045..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_KBLGT3_H__
-#define __I915_OA_KBLGT3_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_kblgt3(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c b/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c
deleted file mode 100644 (file)
index 1bc359e..0000000
+++ /dev/null
@@ -1,88 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_sklgt2.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
-       { _MMIO(0x2740), 0x00000000 },
-       { _MMIO(0x2714), 0xf0800000 },
-       { _MMIO(0x2710), 0x00000000 },
-       { _MMIO(0x2724), 0xf0800000 },
-       { _MMIO(0x2720), 0x00000000 },
-       { _MMIO(0x2770), 0x00000004 },
-       { _MMIO(0x2774), 0x00000000 },
-       { _MMIO(0x2778), 0x00000003 },
-       { _MMIO(0x277c), 0x00000000 },
-       { _MMIO(0x2780), 0x00000007 },
-       { _MMIO(0x2784), 0x00000000 },
-       { _MMIO(0x2788), 0x00100002 },
-       { _MMIO(0x278c), 0x0000fff7 },
-       { _MMIO(0x2790), 0x00100002 },
-       { _MMIO(0x2794), 0x0000ffcf },
-       { _MMIO(0x2798), 0x00100082 },
-       { _MMIO(0x279c), 0x0000ffef },
-       { _MMIO(0x27a0), 0x001000c2 },
-       { _MMIO(0x27a4), 0x0000ffe7 },
-       { _MMIO(0x27a8), 0x00100001 },
-       { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
-       { _MMIO(0x9840), 0x00000080 },
-       { _MMIO(0x9888), 0x11810000 },
-       { _MMIO(0x9888), 0x07810016 },
-       { _MMIO(0x9888), 0x1f810000 },
-       { _MMIO(0x9888), 0x1d810000 },
-       { _MMIO(0x9888), 0x1b930040 },
-       { _MMIO(0x9888), 0x07e54000 },
-       { _MMIO(0x9888), 0x1f908000 },
-       { _MMIO(0x9888), 0x11900000 },
-       { _MMIO(0x9888), 0x37900000 },
-       { _MMIO(0x9888), 0x53900000 },
-       { _MMIO(0x9888), 0x45900000 },
-       { _MMIO(0x9888), 0x33900000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
-       return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_sklgt2(struct drm_i915_private *dev_priv)
-{
-       strlcpy(dev_priv->perf.test_config.uuid,
-               "1651949f-0ac0-4cb1-a06f-dafd74a407d1",
-               sizeof(dev_priv->perf.test_config.uuid));
-       dev_priv->perf.test_config.id = 1;
-
-       dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
-       dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
-       dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
-       dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
-       dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
-       dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
-       dev_priv->perf.test_config.sysfs_metric.name = "1651949f-0ac0-4cb1-a06f-dafd74a407d1";
-       dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
-       dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
-       dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
-       dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
-       dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h b/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h
deleted file mode 100644 (file)
index fda70c5..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_SKLGT2_H__
-#define __I915_OA_SKLGT2_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_sklgt2(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c b/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c
deleted file mode 100644 (file)
index 6e352f8..0000000
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_sklgt3.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
-       { _MMIO(0x2740), 0x00000000 },
-       { _MMIO(0x2744), 0x00800000 },
-       { _MMIO(0x2714), 0xf0800000 },
-       { _MMIO(0x2710), 0x00000000 },
-       { _MMIO(0x2724), 0xf0800000 },
-       { _MMIO(0x2720), 0x00000000 },
-       { _MMIO(0x2770), 0x00000004 },
-       { _MMIO(0x2774), 0x00000000 },
-       { _MMIO(0x2778), 0x00000003 },
-       { _MMIO(0x277c), 0x00000000 },
-       { _MMIO(0x2780), 0x00000007 },
-       { _MMIO(0x2784), 0x00000000 },
-       { _MMIO(0x2788), 0x00100002 },
-       { _MMIO(0x278c), 0x0000fff7 },
-       { _MMIO(0x2790), 0x00100002 },
-       { _MMIO(0x2794), 0x0000ffcf },
-       { _MMIO(0x2798), 0x00100082 },
-       { _MMIO(0x279c), 0x0000ffef },
-       { _MMIO(0x27a0), 0x001000c2 },
-       { _MMIO(0x27a4), 0x0000ffe7 },
-       { _MMIO(0x27a8), 0x00100001 },
-       { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
-       { _MMIO(0x9840), 0x00000080 },
-       { _MMIO(0x9888), 0x11810000 },
-       { _MMIO(0x9888), 0x07810013 },
-       { _MMIO(0x9888), 0x1f810000 },
-       { _MMIO(0x9888), 0x1d810000 },
-       { _MMIO(0x9888), 0x1b930040 },
-       { _MMIO(0x9888), 0x07e54000 },
-       { _MMIO(0x9888), 0x1f908000 },
-       { _MMIO(0x9888), 0x11900000 },
-       { _MMIO(0x9888), 0x37900000 },
-       { _MMIO(0x9888), 0x53900000 },
-       { _MMIO(0x9888), 0x45900000 },
-       { _MMIO(0x9888), 0x33900000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
-       return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_sklgt3(struct drm_i915_private *dev_priv)
-{
-       strlcpy(dev_priv->perf.test_config.uuid,
-               "2b985803-d3c9-4629-8a4f-634bfecba0e8",
-               sizeof(dev_priv->perf.test_config.uuid));
-       dev_priv->perf.test_config.id = 1;
-
-       dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
-       dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
-       dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
-       dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
-       dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
-       dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
-       dev_priv->perf.test_config.sysfs_metric.name = "2b985803-d3c9-4629-8a4f-634bfecba0e8";
-       dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
-       dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
-       dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
-       dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
-       dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h b/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h
deleted file mode 100644 (file)
index df74eba..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_SKLGT3_H__
-#define __I915_OA_SKLGT3_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_sklgt3(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c b/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c
deleted file mode 100644 (file)
index 8f34511..0000000
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_sklgt4.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
-       { _MMIO(0x2740), 0x00000000 },
-       { _MMIO(0x2744), 0x00800000 },
-       { _MMIO(0x2714), 0xf0800000 },
-       { _MMIO(0x2710), 0x00000000 },
-       { _MMIO(0x2724), 0xf0800000 },
-       { _MMIO(0x2720), 0x00000000 },
-       { _MMIO(0x2770), 0x00000004 },
-       { _MMIO(0x2774), 0x00000000 },
-       { _MMIO(0x2778), 0x00000003 },
-       { _MMIO(0x277c), 0x00000000 },
-       { _MMIO(0x2780), 0x00000007 },
-       { _MMIO(0x2784), 0x00000000 },
-       { _MMIO(0x2788), 0x00100002 },
-       { _MMIO(0x278c), 0x0000fff7 },
-       { _MMIO(0x2790), 0x00100002 },
-       { _MMIO(0x2794), 0x0000ffcf },
-       { _MMIO(0x2798), 0x00100082 },
-       { _MMIO(0x279c), 0x0000ffef },
-       { _MMIO(0x27a0), 0x001000c2 },
-       { _MMIO(0x27a4), 0x0000ffe7 },
-       { _MMIO(0x27a8), 0x00100001 },
-       { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
-       { _MMIO(0x9840), 0x00000080 },
-       { _MMIO(0x9888), 0x11810000 },
-       { _MMIO(0x9888), 0x07810013 },
-       { _MMIO(0x9888), 0x1f810000 },
-       { _MMIO(0x9888), 0x1d810000 },
-       { _MMIO(0x9888), 0x1b930040 },
-       { _MMIO(0x9888), 0x07e54000 },
-       { _MMIO(0x9888), 0x1f908000 },
-       { _MMIO(0x9888), 0x11900000 },
-       { _MMIO(0x9888), 0x37900000 },
-       { _MMIO(0x9888), 0x53900000 },
-       { _MMIO(0x9888), 0x45900000 },
-       { _MMIO(0x9888), 0x33900000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
-       return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_sklgt4(struct drm_i915_private *dev_priv)
-{
-       strlcpy(dev_priv->perf.test_config.uuid,
-               "882fa433-1f4a-4a67-a962-c741888fe5f5",
-               sizeof(dev_priv->perf.test_config.uuid));
-       dev_priv->perf.test_config.id = 1;
-
-       dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
-       dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
-       dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
-       dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
-       dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
-       dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
-       dev_priv->perf.test_config.sysfs_metric.name = "882fa433-1f4a-4a67-a962-c741888fe5f5";
-       dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
-       dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
-       dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
-       dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
-       dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h b/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h
deleted file mode 100644 (file)
index 378ab7a..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_SKLGT4_H__
-#define __I915_OA_SKLGT4_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_sklgt4(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_tgl.c b/drivers/gpu/drm/i915/oa/i915_oa_tgl.c
deleted file mode 100644 (file)
index a29d937..0000000
+++ /dev/null
@@ -1,121 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_tgl.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
-       { _MMIO(0xD920), 0x00000000 },
-       { _MMIO(0xD900), 0x00000000 },
-       { _MMIO(0xD904), 0xF0800000 },
-       { _MMIO(0xD910), 0x00000000 },
-       { _MMIO(0xD914), 0xF0800000 },
-       { _MMIO(0xDC40), 0x00FF0000 },
-       { _MMIO(0xD940), 0x00000004 },
-       { _MMIO(0xD944), 0x0000FFFF },
-       { _MMIO(0xDC00), 0x00000004 },
-       { _MMIO(0xDC04), 0x0000FFFF },
-       { _MMIO(0xD948), 0x00000003 },
-       { _MMIO(0xD94C), 0x0000FFFF },
-       { _MMIO(0xDC08), 0x00000003 },
-       { _MMIO(0xDC0C), 0x0000FFFF },
-       { _MMIO(0xD950), 0x00000007 },
-       { _MMIO(0xD954), 0x0000FFFF },
-       { _MMIO(0xDC10), 0x00000007 },
-       { _MMIO(0xDC14), 0x0000FFFF },
-       { _MMIO(0xD958), 0x00100002 },
-       { _MMIO(0xD95C), 0x0000FFF7 },
-       { _MMIO(0xDC18), 0x00100002 },
-       { _MMIO(0xDC1C), 0x0000FFF7 },
-       { _MMIO(0xD960), 0x00100002 },
-       { _MMIO(0xD964), 0x0000FFCF },
-       { _MMIO(0xDC20), 0x00100002 },
-       { _MMIO(0xDC24), 0x0000FFCF },
-       { _MMIO(0xD968), 0x00100082 },
-       { _MMIO(0xD96C), 0x0000FFEF },
-       { _MMIO(0xDC28), 0x00100082 },
-       { _MMIO(0xDC2C), 0x0000FFEF },
-       { _MMIO(0xD970), 0x001000C2 },
-       { _MMIO(0xD974), 0x0000FFE7 },
-       { _MMIO(0xDC30), 0x001000C2 },
-       { _MMIO(0xDC34), 0x0000FFE7 },
-       { _MMIO(0xD978), 0x00100001 },
-       { _MMIO(0xD97C), 0x0000FFE7 },
-       { _MMIO(0xDC38), 0x00100001 },
-       { _MMIO(0xDC3C), 0x0000FFE7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
-       { _MMIO(0x0D04), 0x00000200 },
-       { _MMIO(0x9840), 0x00000000 },
-       { _MMIO(0x9884), 0x00000000 },
-       { _MMIO(0x9888), 0x280E0000 },
-       { _MMIO(0x9888), 0x1E0E0147 },
-       { _MMIO(0x9888), 0x180E0000 },
-       { _MMIO(0x9888), 0x160E0000 },
-       { _MMIO(0x9888), 0x1E0F1000 },
-       { _MMIO(0x9888), 0x1E104000 },
-       { _MMIO(0x9888), 0x2E020100 },
-       { _MMIO(0x9888), 0x2C030004 },
-       { _MMIO(0x9888), 0x38003000 },
-       { _MMIO(0x9888), 0x1E0A8000 },
-       { _MMIO(0x9884), 0x00000003 },
-       { _MMIO(0x9888), 0x49110000 },
-       { _MMIO(0x9888), 0x5D101400 },
-       { _MMIO(0x9888), 0x1D140020 },
-       { _MMIO(0x9888), 0x1D1103A3 },
-       { _MMIO(0x9888), 0x01110000 },
-       { _MMIO(0x9888), 0x61111000 },
-       { _MMIO(0x9888), 0x1F128000 },
-       { _MMIO(0x9888), 0x17100000 },
-       { _MMIO(0x9888), 0x55100630 },
-       { _MMIO(0x9888), 0x57100000 },
-       { _MMIO(0x9888), 0x31100000 },
-       { _MMIO(0x9884), 0x00000003 },
-       { _MMIO(0x9888), 0x65100002 },
-       { _MMIO(0x9884), 0x00000000 },
-       { _MMIO(0x9888), 0x42000001 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
-       return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_tgl(struct drm_i915_private *dev_priv)
-{
-       strlcpy(dev_priv->perf.test_config.uuid,
-               "80a833f0-2504-4321-8894-e9277844ce7b",
-               sizeof(dev_priv->perf.test_config.uuid));
-       dev_priv->perf.test_config.id = 1;
-
-       dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
-       dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
-       dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
-       dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
-       dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
-       dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
-       dev_priv->perf.test_config.sysfs_metric.name = "80a833f0-2504-4321-8894-e9277844ce7b";
-       dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
-       dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
-       dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
-       dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
-       dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_tgl.h b/drivers/gpu/drm/i915/oa/i915_oa_tgl.h
deleted file mode 100644 (file)
index 4c25f0b..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_TGL_H__
-#define __I915_OA_TGL_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_tgl(struct drm_i915_private *dev_priv);
-
-#endif
index 68bbb158016263d79024c9ef96eee16bf7151314..4002c984c2e0c261f54d46e362b8b3f7caf40cad 100644 (file)
@@ -153,7 +153,7 @@ static int live_active_wait(void *arg)
        if (IS_ERR(active))
                return PTR_ERR(active);
 
-       i915_active_wait(&active->base);
+       __i915_active_wait(&active->base, TASK_UNINTERRUPTIBLE);
        if (!READ_ONCE(active->retired)) {
                struct drm_printer p = drm_err_printer(__func__);
 
@@ -228,11 +228,11 @@ static int live_active_barrier(void *arg)
        }
 
        i915_active_release(&active->base);
+       if (err)
+               goto out;
 
-       if (err == 0)
-               err = i915_active_wait(&active->base);
-
-       if (err == 0 && !READ_ONCE(active->retired)) {
+       __i915_active_wait(&active->base, TASK_UNINTERRUPTIBLE);
+       if (!READ_ONCE(active->retired)) {
                pr_err("i915_active not retired after flushing barriers!\n");
                err = -EINVAL;
        }
@@ -277,7 +277,7 @@ static struct intel_engine_cs *node_to_barrier(struct active_node *it)
 
 void i915_active_print(struct i915_active *ref, struct drm_printer *m)
 {
-       drm_printf(m, "active %pS:%pS\n", ref->active, ref->retire);
+       drm_printf(m, "active %ps:%ps\n", ref->active, ref->retire);
        drm_printf(m, "\tcount: %d\n", atomic_read(&ref->count));
        drm_printf(m, "\tpreallocated barriers? %s\n",
                   yesno(!llist_empty(&ref->preallocated_barriers)));
index 623759b73bb4f9410e5fe939fd2a2b1af847f606..88d400b9df88c35c4544c64d1ab6c1e0b02eb649 100644 (file)
@@ -125,8 +125,6 @@ static void pm_resume(struct drm_i915_private *i915)
         */
        with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
                i915_ggtt_resume(&i915->ggtt);
-               i915_gem_restore_fences(&i915->ggtt);
-
                i915_gem_resume(i915);
        }
 }
index 06ef88510209faacaa18454e9e1bdd9aac70a0cd..028baae9631f273d40983bad29cb24848351ded5 100644 (file)
@@ -45,8 +45,8 @@ static void quirk_add(struct drm_i915_gem_object *obj,
 
 static int populate_ggtt(struct i915_ggtt *ggtt, struct list_head *objects)
 {
-       unsigned long unbound, bound, count;
        struct drm_i915_gem_object *obj;
+       unsigned long count;
 
        count = 0;
        do {
@@ -72,30 +72,6 @@ static int populate_ggtt(struct i915_ggtt *ggtt, struct list_head *objects)
        pr_debug("Filled GGTT with %lu pages [%llu total]\n",
                 count, ggtt->vm.total / PAGE_SIZE);
 
-       bound = 0;
-       unbound = 0;
-       list_for_each_entry(obj, objects, st_link) {
-               GEM_BUG_ON(!obj->mm.quirked);
-
-               if (atomic_read(&obj->bind_count))
-                       bound++;
-               else
-                       unbound++;
-       }
-       GEM_BUG_ON(bound + unbound != count);
-
-       if (unbound) {
-               pr_err("%s: Found %lu objects unbound, expected %u!\n",
-                      __func__, unbound, 0);
-               return -EINVAL;
-       }
-
-       if (bound != count) {
-               pr_err("%s: Found %lu objects bound, expected %lu!\n",
-                      __func__, bound, count);
-               return -EINVAL;
-       }
-
        if (list_empty(&ggtt->vm.bound_list)) {
                pr_err("No objects on the GGTT inactive list!\n");
                return -EINVAL;
index b342bef5e7c95613397d618ae87145375fcb7ec2..2e471500a64632f20d1cf5f5f006ef8139918519 100644 (file)
@@ -331,9 +331,6 @@ static void close_object_list(struct list_head *objects,
                vma = i915_vma_instance(obj, vm, NULL);
                if (!IS_ERR(vma))
                        ignored = i915_vma_unbind(vma);
-               /* Only ppgtt vma may be closed before the object is freed */
-               if (!IS_ERR(vma) && !i915_vma_is_ggtt(vma))
-                       i915_vma_close(vma);
 
                list_del(&obj->st_link);
                i915_gem_object_put(obj);
@@ -591,7 +588,7 @@ static int walk_hole(struct i915_address_space *vm,
                                pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
                                       __func__, addr, vma->size,
                                       hole_start, hole_end, err);
-                               goto err_close;
+                               goto err_put;
                        }
                        i915_vma_unpin(vma);
 
@@ -600,14 +597,14 @@ static int walk_hole(struct i915_address_space *vm,
                                pr_err("%s incorrect at %llx + %llx\n",
                                       __func__, addr, vma->size);
                                err = -EINVAL;
-                               goto err_close;
+                               goto err_put;
                        }
 
                        err = i915_vma_unbind(vma);
                        if (err) {
                                pr_err("%s unbind failed at %llx + %llx  with err=%d\n",
                                       __func__, addr, vma->size, err);
-                               goto err_close;
+                               goto err_put;
                        }
 
                        GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
@@ -616,13 +613,10 @@ static int walk_hole(struct i915_address_space *vm,
                                        "%s timed out at %llx\n",
                                        __func__, addr)) {
                                err = -EINTR;
-                               goto err_close;
+                               goto err_put;
                        }
                }
 
-err_close:
-               if (!i915_vma_is_ggtt(vma))
-                       i915_vma_close(vma);
 err_put:
                i915_gem_object_put(obj);
                if (err)
@@ -675,7 +669,7 @@ static int pot_hole(struct i915_address_space *vm,
                                       addr,
                                       hole_start, hole_end,
                                       err);
-                               goto err;
+                               goto err_obj;
                        }
 
                        if (!drm_mm_node_allocated(&vma->node) ||
@@ -685,7 +679,7 @@ static int pot_hole(struct i915_address_space *vm,
                                i915_vma_unpin(vma);
                                err = i915_vma_unbind(vma);
                                err = -EINVAL;
-                               goto err;
+                               goto err_obj;
                        }
 
                        i915_vma_unpin(vma);
@@ -697,13 +691,10 @@ static int pot_hole(struct i915_address_space *vm,
                                "%s timed out after %d/%d\n",
                                __func__, pot, fls64(hole_end - 1) - 1)) {
                        err = -EINTR;
-                       goto err;
+                       goto err_obj;
                }
        }
 
-err:
-       if (!i915_vma_is_ggtt(vma))
-               i915_vma_close(vma);
 err_obj:
        i915_gem_object_put(obj);
        return err;
@@ -778,7 +769,7 @@ static int drunk_hole(struct i915_address_space *vm,
                                       addr, BIT_ULL(size),
                                       hole_start, hole_end,
                                       err);
-                               goto err;
+                               goto err_obj;
                        }
 
                        if (!drm_mm_node_allocated(&vma->node) ||
@@ -788,7 +779,7 @@ static int drunk_hole(struct i915_address_space *vm,
                                i915_vma_unpin(vma);
                                err = i915_vma_unbind(vma);
                                err = -EINVAL;
-                               goto err;
+                               goto err_obj;
                        }
 
                        i915_vma_unpin(vma);
@@ -799,13 +790,10 @@ static int drunk_hole(struct i915_address_space *vm,
                                        "%s timed out after %d/%d\n",
                                        __func__, n, count)) {
                                err = -EINTR;
-                               goto err;
+                               goto err_obj;
                        }
                }
 
-err:
-               if (!i915_vma_is_ggtt(vma))
-                       i915_vma_close(vma);
 err_obj:
                i915_gem_object_put(obj);
                kfree(order);
@@ -1229,7 +1217,6 @@ static void track_vma_bind(struct i915_vma *vma)
 {
        struct drm_i915_gem_object *obj = vma->obj;
 
-       atomic_inc(&obj->bind_count); /* track for eviction later */
        __i915_gem_object_pin_pages(obj);
 
        GEM_BUG_ON(vma->pages);
index 0a953bfc0585617a881fcde39b9e02982a23d350..5dd5d81646c4ac405d31694a8ec307f3762008b0 100644 (file)
@@ -37,6 +37,7 @@ selftest(gem, i915_gem_live_selftests)
 selftest(evict, i915_gem_evict_live_selftests)
 selftest(hugepages, i915_gem_huge_page_live_selftests)
 selftest(gem_contexts, i915_gem_context_live_selftests)
+selftest(gem_execbuf, i915_gem_execbuffer_live_selftests)
 selftest(blt, i915_gem_object_blt_live_selftests)
 selftest(client, i915_gem_client_blt_live_selftests)
 selftest(reset, intel_reset_live_selftests)
index 5b39bab4da1d0cb3246392dd64a19c9837889bfb..6a2be7d0dd956dec05f48394baf7347363da2033 100644 (file)
@@ -16,6 +16,7 @@
  * Tests are executed in order by igt/drv_selftest
  */
 selftest(sanitycheck, i915_mock_sanitycheck) /* keep first (igt selfcheck) */
+selftest(shmem, shmem_utils_mock_selftests)
 selftest(fence, i915_sw_fence_mock_selftests)
 selftest(scatterlist, scatterlist_mock_selftests)
 selftest(syncmap, i915_syncmap_mock_selftests)
index d1a1568c47baaa36183038104c2a2c12165c4705..8eb3108f1767a7f42e5ed101868ab096055104f1 100644 (file)
 #include "igt_flush_test.h"
 #include "lib_sw_fence.h"
 
+#define TEST_OA_CONFIG_UUID "12345678-1234-1234-1234-1234567890ab"
+
+static int
+alloc_empty_config(struct i915_perf *perf)
+{
+       struct i915_oa_config *oa_config;
+
+       oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL);
+       if (!oa_config)
+               return -ENOMEM;
+
+       oa_config->perf = perf;
+       kref_init(&oa_config->ref);
+
+       strlcpy(oa_config->uuid, TEST_OA_CONFIG_UUID, sizeof(oa_config->uuid));
+
+       mutex_lock(&perf->metrics_lock);
+
+       oa_config->id = idr_alloc(&perf->metrics_idr, oa_config, 2, 0, GFP_KERNEL);
+       if (oa_config->id < 0)  {
+               mutex_unlock(&perf->metrics_lock);
+               i915_oa_config_put(oa_config);
+               return -ENOMEM;
+       }
+
+       mutex_unlock(&perf->metrics_lock);
+
+       return 0;
+}
+
+static void
+destroy_empty_config(struct i915_perf *perf)
+{
+       struct i915_oa_config *oa_config = NULL, *tmp;
+       int id;
+
+       mutex_lock(&perf->metrics_lock);
+
+       idr_for_each_entry(&perf->metrics_idr, tmp, id) {
+               if (!strcmp(tmp->uuid, TEST_OA_CONFIG_UUID)) {
+                       oa_config = tmp;
+                       break;
+               }
+       }
+
+       if (oa_config)
+               idr_remove(&perf->metrics_idr, oa_config->id);
+
+       mutex_unlock(&perf->metrics_lock);
+
+       if (oa_config)
+               i915_oa_config_put(oa_config);
+}
+
+static struct i915_oa_config *
+get_empty_config(struct i915_perf *perf)
+{
+       struct i915_oa_config *oa_config = NULL, *tmp;
+       int id;
+
+       mutex_lock(&perf->metrics_lock);
+
+       idr_for_each_entry(&perf->metrics_idr, tmp, id) {
+               if (!strcmp(tmp->uuid, TEST_OA_CONFIG_UUID)) {
+                       oa_config = i915_oa_config_get(tmp);
+                       break;
+               }
+       }
+
+       mutex_unlock(&perf->metrics_lock);
+
+       return oa_config;
+}
+
 static struct i915_perf_stream *
 test_stream(struct i915_perf *perf)
 {
        struct drm_i915_perf_open_param param = {};
+       struct i915_oa_config *oa_config = get_empty_config(perf);
        struct perf_open_properties props = {
                .engine = intel_engine_lookup_user(perf->i915,
                                                   I915_ENGINE_CLASS_RENDER,
@@ -25,13 +100,19 @@ test_stream(struct i915_perf *perf)
                .sample_flags = SAMPLE_OA_REPORT,
                .oa_format = IS_GEN(perf->i915, 12) ?
                I915_OA_FORMAT_A32u40_A4u32_B8_C8 : I915_OA_FORMAT_C4_B8,
-               .metrics_set = 1,
        };
        struct i915_perf_stream *stream;
 
+       if (!oa_config)
+               return NULL;
+
+       props.metrics_set = oa_config->id;
+
        stream = kzalloc(sizeof(*stream), GFP_KERNEL);
-       if (!stream)
+       if (!stream) {
+               i915_oa_config_put(oa_config);
                return NULL;
+       }
 
        stream->perf = perf;
 
@@ -42,6 +123,8 @@ test_stream(struct i915_perf *perf)
        }
        mutex_unlock(&perf->lock);
 
+       i915_oa_config_put(oa_config);
+
        return stream;
 }
 
@@ -138,8 +221,7 @@ static int live_noa_delay(void *arg)
                goto out;
        }
 
-       if (rq->engine->emit_init_breadcrumb &&
-           i915_request_timeline(rq)->has_initial_breadcrumb) {
+       if (rq->engine->emit_init_breadcrumb) {
                err = rq->engine->emit_init_breadcrumb(rq);
                if (err) {
                        i915_request_add(rq);
@@ -180,8 +262,7 @@ static int live_noa_delay(void *arg)
 
        delay = intel_read_status_page(stream->engine, 0x102);
        delay -= intel_read_status_page(stream->engine, 0x100);
-       delay = div_u64(mul_u32_u32(delay, 1000 * 1000),
-                       RUNTIME_INFO(i915)->cs_timestamp_frequency_khz);
+       delay = i915_cs_timestamp_ticks_to_ns(i915, delay);
        pr_info("GPU delay: %uns, expected %lluns\n",
                delay, expected);
 
@@ -206,6 +287,7 @@ int i915_perf_live_selftests(struct drm_i915_private *i915)
                SUBTEST(live_noa_delay),
        };
        struct i915_perf *perf = &i915->perf;
+       int err;
 
        if (!perf->metrics_kobj || !perf->ops.enable_metric_set)
                return 0;
@@ -213,5 +295,13 @@ int i915_perf_live_selftests(struct drm_i915_private *i915)
        if (intel_gt_is_wedged(&i915->gt))
                return 0;
 
-       return i915_subtests(tests, i915);
+       err = alloc_empty_config(&i915->perf);
+       if (err)
+               return err;
+
+       err = i915_subtests(tests, i915);
+
+       destroy_empty_config(&i915->perf);
+
+       return err;
 }
index 3bf7f53e99243aac5d23b0f8dfd77e0b39e287ff..d8da142985eb2e16a813e085dcf91d9c2e0965a5 100644 (file)
@@ -16,5 +16,6 @@
  * Tests are executed in order by igt/i915_selftest
  */
 selftest(engine_cs, intel_engine_cs_perf_selftests)
+selftest(request, i915_request_perf_selftests)
 selftest(blt, i915_gem_object_blt_perf_selftests)
 selftest(region, intel_memory_region_perf_selftests)
index f89d9c42f1fad51130d913d0aa561a6c693ab182..6014e8dfcbb18581f4a4278a051dc71db6e88112 100644 (file)
  */
 
 #include <linux/prime_numbers.h>
+#include <linux/pm_qos.h>
 
 #include "gem/i915_gem_pm.h"
 #include "gem/selftests/mock_context.h"
 
 #include "gt/intel_engine_pm.h"
+#include "gt/intel_engine_user.h"
 #include "gt/intel_gt.h"
 
 #include "i915_random.h"
@@ -51,6 +53,11 @@ static unsigned int num_uabi_engines(struct drm_i915_private *i915)
        return count;
 }
 
+static struct intel_engine_cs *rcs0(struct drm_i915_private *i915)
+{
+       return intel_engine_lookup_user(i915, I915_ENGINE_CLASS_RENDER, 0);
+}
+
 static int igt_add_request(void *arg)
 {
        struct drm_i915_private *i915 = arg;
@@ -58,7 +65,7 @@ static int igt_add_request(void *arg)
 
        /* Basic preliminary test to create a request and let it loose! */
 
-       request = mock_request(i915->engine[RCS0]->kernel_context, HZ / 10);
+       request = mock_request(rcs0(i915)->kernel_context, HZ / 10);
        if (!request)
                return -ENOMEM;
 
@@ -76,7 +83,7 @@ static int igt_wait_request(void *arg)
 
        /* Submit a request, then wait upon it */
 
-       request = mock_request(i915->engine[RCS0]->kernel_context, T);
+       request = mock_request(rcs0(i915)->kernel_context, T);
        if (!request)
                return -ENOMEM;
 
@@ -145,7 +152,7 @@ static int igt_fence_wait(void *arg)
 
        /* Submit a request, treat it as a fence and wait upon it */
 
-       request = mock_request(i915->engine[RCS0]->kernel_context, T);
+       request = mock_request(rcs0(i915)->kernel_context, T);
        if (!request)
                return -ENOMEM;
 
@@ -420,7 +427,7 @@ static int mock_breadcrumbs_smoketest(void *arg)
 {
        struct drm_i915_private *i915 = arg;
        struct smoketest t = {
-               .engine = i915->engine[RCS0],
+               .engine = rcs0(i915),
                .ncontexts = 1024,
                .max_batch = 1024,
                .request_alloc = __mock_request_alloc
@@ -809,10 +816,12 @@ static int recursive_batch_resolve(struct i915_vma *batch)
                return PTR_ERR(cmd);
 
        *cmd = MI_BATCH_BUFFER_END;
-       intel_gt_chipset_flush(batch->vm->gt);
 
+       __i915_gem_object_flush_map(batch->obj, 0, sizeof(*cmd));
        i915_gem_object_unpin_map(batch->obj);
 
+       intel_gt_chipset_flush(batch->vm->gt);
+
        return 0;
 }
 
@@ -858,13 +867,6 @@ static int live_all_engines(void *arg)
                        goto out_request;
                }
 
-               err = engine->emit_bb_start(request[idx],
-                                           batch->node.start,
-                                           batch->node.size,
-                                           0);
-               GEM_BUG_ON(err);
-               request[idx]->batch = batch;
-
                i915_vma_lock(batch);
                err = i915_request_await_object(request[idx], batch->obj, 0);
                if (err == 0)
@@ -872,6 +874,13 @@ static int live_all_engines(void *arg)
                i915_vma_unlock(batch);
                GEM_BUG_ON(err);
 
+               err = engine->emit_bb_start(request[idx],
+                                           batch->node.start,
+                                           batch->node.size,
+                                           0);
+               GEM_BUG_ON(err);
+               request[idx]->batch = batch;
+
                i915_request_get(request[idx]);
                i915_request_add(request[idx]);
                idx++;
@@ -986,13 +995,6 @@ static int live_sequential_engines(void *arg)
                        }
                }
 
-               err = engine->emit_bb_start(request[idx],
-                                           batch->node.start,
-                                           batch->node.size,
-                                           0);
-               GEM_BUG_ON(err);
-               request[idx]->batch = batch;
-
                i915_vma_lock(batch);
                err = i915_request_await_object(request[idx],
                                                batch->obj, false);
@@ -1001,6 +1003,13 @@ static int live_sequential_engines(void *arg)
                i915_vma_unlock(batch);
                GEM_BUG_ON(err);
 
+               err = engine->emit_bb_start(request[idx],
+                                           batch->node.start,
+                                           batch->node.size,
+                                           0);
+               GEM_BUG_ON(err);
+               request[idx]->batch = batch;
+
                i915_request_get(request[idx]);
                i915_request_add(request[idx]);
 
@@ -1053,9 +1062,12 @@ out_request:
                                              I915_MAP_WC);
                if (!IS_ERR(cmd)) {
                        *cmd = MI_BATCH_BUFFER_END;
-                       intel_gt_chipset_flush(engine->gt);
 
+                       __i915_gem_object_flush_map(request[idx]->batch->obj,
+                                                   0, sizeof(*cmd));
                        i915_gem_object_unpin_map(request[idx]->batch->obj);
+
+                       intel_gt_chipset_flush(engine->gt);
                }
 
                i915_vma_put(request[idx]->batch);
@@ -1233,7 +1245,7 @@ static int live_parallel_engines(void *arg)
                struct igt_live_test t;
                unsigned int idx;
 
-               snprintf(name, sizeof(name), "%pS", fn);
+               snprintf(name, sizeof(name), "%ps", *fn);
                err = igt_live_test_begin(&t, i915, __func__, name);
                if (err)
                        break;
@@ -1470,3 +1482,572 @@ int i915_request_live_selftests(struct drm_i915_private *i915)
 
        return i915_subtests(tests, i915);
 }
+
+static int switch_to_kernel_sync(struct intel_context *ce, int err)
+{
+       struct i915_request *rq;
+       struct dma_fence *fence;
+
+       rq = intel_engine_create_kernel_request(ce->engine);
+       if (IS_ERR(rq))
+               return PTR_ERR(rq);
+
+       fence = i915_active_fence_get(&ce->timeline->last_request);
+       if (fence) {
+               i915_request_await_dma_fence(rq, fence);
+               dma_fence_put(fence);
+       }
+
+       rq = i915_request_get(rq);
+       i915_request_add(rq);
+       if (i915_request_wait(rq, 0, HZ / 2) < 0 && !err)
+               err = -ETIME;
+       i915_request_put(rq);
+
+       while (!err && !intel_engine_is_idle(ce->engine))
+               intel_engine_flush_submission(ce->engine);
+
+       return err;
+}
+
+struct perf_stats {
+       struct intel_engine_cs *engine;
+       unsigned long count;
+       ktime_t time;
+       ktime_t busy;
+       u64 runtime;
+};
+
+struct perf_series {
+       struct drm_i915_private *i915;
+       unsigned int nengines;
+       struct intel_context *ce[];
+};
+
+static int s_sync0(void *arg)
+{
+       struct perf_series *ps = arg;
+       IGT_TIMEOUT(end_time);
+       unsigned int idx = 0;
+       int err = 0;
+
+       GEM_BUG_ON(!ps->nengines);
+       do {
+               struct i915_request *rq;
+
+               rq = i915_request_create(ps->ce[idx]);
+               if (IS_ERR(rq)) {
+                       err = PTR_ERR(rq);
+                       break;
+               }
+
+               i915_request_get(rq);
+               i915_request_add(rq);
+
+               if (i915_request_wait(rq, 0, HZ / 5) < 0)
+                       err = -ETIME;
+               i915_request_put(rq);
+               if (err)
+                       break;
+
+               if (++idx == ps->nengines)
+                       idx = 0;
+       } while (!__igt_timeout(end_time, NULL));
+
+       return err;
+}
+
+static int s_sync1(void *arg)
+{
+       struct perf_series *ps = arg;
+       struct i915_request *prev = NULL;
+       IGT_TIMEOUT(end_time);
+       unsigned int idx = 0;
+       int err = 0;
+
+       GEM_BUG_ON(!ps->nengines);
+       do {
+               struct i915_request *rq;
+
+               rq = i915_request_create(ps->ce[idx]);
+               if (IS_ERR(rq)) {
+                       err = PTR_ERR(rq);
+                       break;
+               }
+
+               i915_request_get(rq);
+               i915_request_add(rq);
+
+               if (prev && i915_request_wait(prev, 0, HZ / 5) < 0)
+                       err = -ETIME;
+               i915_request_put(prev);
+               prev = rq;
+               if (err)
+                       break;
+
+               if (++idx == ps->nengines)
+                       idx = 0;
+       } while (!__igt_timeout(end_time, NULL));
+       i915_request_put(prev);
+
+       return err;
+}
+
+static int s_many(void *arg)
+{
+       struct perf_series *ps = arg;
+       IGT_TIMEOUT(end_time);
+       unsigned int idx = 0;
+
+       GEM_BUG_ON(!ps->nengines);
+       do {
+               struct i915_request *rq;
+
+               rq = i915_request_create(ps->ce[idx]);
+               if (IS_ERR(rq))
+                       return PTR_ERR(rq);
+
+               i915_request_add(rq);
+
+               if (++idx == ps->nengines)
+                       idx = 0;
+       } while (!__igt_timeout(end_time, NULL));
+
+       return 0;
+}
+
+static int perf_series_engines(void *arg)
+{
+       struct drm_i915_private *i915 = arg;
+       static int (* const func[])(void *arg) = {
+               s_sync0,
+               s_sync1,
+               s_many,
+               NULL,
+       };
+       const unsigned int nengines = num_uabi_engines(i915);
+       struct intel_engine_cs *engine;
+       int (* const *fn)(void *arg);
+       struct pm_qos_request qos;
+       struct perf_stats *stats;
+       struct perf_series *ps;
+       unsigned int idx;
+       int err = 0;
+
+       stats = kcalloc(nengines, sizeof(*stats), GFP_KERNEL);
+       if (!stats)
+               return -ENOMEM;
+
+       ps = kzalloc(struct_size(ps, ce, nengines), GFP_KERNEL);
+       if (!ps) {
+               kfree(stats);
+               return -ENOMEM;
+       }
+
+       cpu_latency_qos_add_request(&qos, 0); /* disable cstates */
+
+       ps->i915 = i915;
+       ps->nengines = nengines;
+
+       idx = 0;
+       for_each_uabi_engine(engine, i915) {
+               struct intel_context *ce;
+
+               ce = intel_context_create(engine);
+               if (IS_ERR(ce))
+                       goto out;
+
+               err = intel_context_pin(ce);
+               if (err) {
+                       intel_context_put(ce);
+                       goto out;
+               }
+
+               ps->ce[idx++] = ce;
+       }
+       GEM_BUG_ON(idx != ps->nengines);
+
+       for (fn = func; *fn && !err; fn++) {
+               char name[KSYM_NAME_LEN];
+               struct igt_live_test t;
+
+               snprintf(name, sizeof(name), "%ps", *fn);
+               err = igt_live_test_begin(&t, i915, __func__, name);
+               if (err)
+                       break;
+
+               for (idx = 0; idx < nengines; idx++) {
+                       struct perf_stats *p =
+                               memset(&stats[idx], 0, sizeof(stats[idx]));
+                       struct intel_context *ce = ps->ce[idx];
+
+                       p->engine = ps->ce[idx]->engine;
+                       intel_engine_pm_get(p->engine);
+
+                       if (intel_engine_supports_stats(p->engine))
+                               p->busy = intel_engine_get_busy_time(p->engine) + 1;
+                       p->runtime = -intel_context_get_total_runtime_ns(ce);
+                       p->time = ktime_get();
+               }
+
+               err = (*fn)(ps);
+               if (igt_live_test_end(&t))
+                       err = -EIO;
+
+               for (idx = 0; idx < nengines; idx++) {
+                       struct perf_stats *p = &stats[idx];
+                       struct intel_context *ce = ps->ce[idx];
+                       int integer, decimal;
+                       u64 busy, dt;
+
+                       p->time = ktime_sub(ktime_get(), p->time);
+                       if (p->busy) {
+                               p->busy = ktime_sub(intel_engine_get_busy_time(p->engine),
+                                                   p->busy - 1);
+                       }
+
+                       err = switch_to_kernel_sync(ce, err);
+                       p->runtime += intel_context_get_total_runtime_ns(ce);
+                       intel_engine_pm_put(p->engine);
+
+                       busy = 100 * ktime_to_ns(p->busy);
+                       dt = ktime_to_ns(p->time);
+                       if (dt) {
+                               integer = div64_u64(busy, dt);
+                               busy -= integer * dt;
+                               decimal = div64_u64(100 * busy, dt);
+                       } else {
+                               integer = 0;
+                               decimal = 0;
+                       }
+
+                       pr_info("%s %5s: { seqno:%d, busy:%d.%02d%%, runtime:%lldms, walltime:%lldms }\n",
+                               name, p->engine->name, ce->timeline->seqno,
+                               integer, decimal,
+                               div_u64(p->runtime, 1000 * 1000),
+                               div_u64(ktime_to_ns(p->time), 1000 * 1000));
+               }
+       }
+
+out:
+       for (idx = 0; idx < nengines; idx++) {
+               if (IS_ERR_OR_NULL(ps->ce[idx]))
+                       break;
+
+               intel_context_unpin(ps->ce[idx]);
+               intel_context_put(ps->ce[idx]);
+       }
+       kfree(ps);
+
+       cpu_latency_qos_remove_request(&qos);
+       kfree(stats);
+       return err;
+}
+
+static int p_sync0(void *arg)
+{
+       struct perf_stats *p = arg;
+       struct intel_engine_cs *engine = p->engine;
+       struct intel_context *ce;
+       IGT_TIMEOUT(end_time);
+       unsigned long count;
+       bool busy;
+       int err = 0;
+
+       ce = intel_context_create(engine);
+       if (IS_ERR(ce))
+               return PTR_ERR(ce);
+
+       err = intel_context_pin(ce);
+       if (err) {
+               intel_context_put(ce);
+               return err;
+       }
+
+       busy = false;
+       if (intel_engine_supports_stats(engine)) {
+               p->busy = intel_engine_get_busy_time(engine);
+               busy = true;
+       }
+
+       p->time = ktime_get();
+       count = 0;
+       do {
+               struct i915_request *rq;
+
+               rq = i915_request_create(ce);
+               if (IS_ERR(rq)) {
+                       err = PTR_ERR(rq);
+                       break;
+               }
+
+               i915_request_get(rq);
+               i915_request_add(rq);
+
+               err = 0;
+               if (i915_request_wait(rq, 0, HZ / 5) < 0)
+                       err = -ETIME;
+               i915_request_put(rq);
+               if (err)
+                       break;
+
+               count++;
+       } while (!__igt_timeout(end_time, NULL));
+       p->time = ktime_sub(ktime_get(), p->time);
+
+       if (busy) {
+               p->busy = ktime_sub(intel_engine_get_busy_time(engine),
+                                   p->busy);
+       }
+
+       err = switch_to_kernel_sync(ce, err);
+       p->runtime = intel_context_get_total_runtime_ns(ce);
+       p->count = count;
+
+       intel_context_unpin(ce);
+       intel_context_put(ce);
+       return err;
+}
+
+static int p_sync1(void *arg)
+{
+       struct perf_stats *p = arg;
+       struct intel_engine_cs *engine = p->engine;
+       struct i915_request *prev = NULL;
+       struct intel_context *ce;
+       IGT_TIMEOUT(end_time);
+       unsigned long count;
+       bool busy;
+       int err = 0;
+
+       ce = intel_context_create(engine);
+       if (IS_ERR(ce))
+               return PTR_ERR(ce);
+
+       err = intel_context_pin(ce);
+       if (err) {
+               intel_context_put(ce);
+               return err;
+       }
+
+       busy = false;
+       if (intel_engine_supports_stats(engine)) {
+               p->busy = intel_engine_get_busy_time(engine);
+               busy = true;
+       }
+
+       p->time = ktime_get();
+       count = 0;
+       do {
+               struct i915_request *rq;
+
+               rq = i915_request_create(ce);
+               if (IS_ERR(rq)) {
+                       err = PTR_ERR(rq);
+                       break;
+               }
+
+               i915_request_get(rq);
+               i915_request_add(rq);
+
+               err = 0;
+               if (prev && i915_request_wait(prev, 0, HZ / 5) < 0)
+                       err = -ETIME;
+               i915_request_put(prev);
+               prev = rq;
+               if (err)
+                       break;
+
+               count++;
+       } while (!__igt_timeout(end_time, NULL));
+       i915_request_put(prev);
+       p->time = ktime_sub(ktime_get(), p->time);
+
+       if (busy) {
+               p->busy = ktime_sub(intel_engine_get_busy_time(engine),
+                                   p->busy);
+       }
+
+       err = switch_to_kernel_sync(ce, err);
+       p->runtime = intel_context_get_total_runtime_ns(ce);
+       p->count = count;
+
+       intel_context_unpin(ce);
+       intel_context_put(ce);
+       return err;
+}
+
+static int p_many(void *arg)
+{
+       struct perf_stats *p = arg;
+       struct intel_engine_cs *engine = p->engine;
+       struct intel_context *ce;
+       IGT_TIMEOUT(end_time);
+       unsigned long count;
+       int err = 0;
+       bool busy;
+
+       ce = intel_context_create(engine);
+       if (IS_ERR(ce))
+               return PTR_ERR(ce);
+
+       err = intel_context_pin(ce);
+       if (err) {
+               intel_context_put(ce);
+               return err;
+       }
+
+       busy = false;
+       if (intel_engine_supports_stats(engine)) {
+               p->busy = intel_engine_get_busy_time(engine);
+               busy = true;
+       }
+
+       count = 0;
+       p->time = ktime_get();
+       do {
+               struct i915_request *rq;
+
+               rq = i915_request_create(ce);
+               if (IS_ERR(rq)) {
+                       err = PTR_ERR(rq);
+                       break;
+               }
+
+               i915_request_add(rq);
+               count++;
+       } while (!__igt_timeout(end_time, NULL));
+       p->time = ktime_sub(ktime_get(), p->time);
+
+       if (busy) {
+               p->busy = ktime_sub(intel_engine_get_busy_time(engine),
+                                   p->busy);
+       }
+
+       err = switch_to_kernel_sync(ce, err);
+       p->runtime = intel_context_get_total_runtime_ns(ce);
+       p->count = count;
+
+       intel_context_unpin(ce);
+       intel_context_put(ce);
+       return err;
+}
+
+static int perf_parallel_engines(void *arg)
+{
+       struct drm_i915_private *i915 = arg;
+       static int (* const func[])(void *arg) = {
+               p_sync0,
+               p_sync1,
+               p_many,
+               NULL,
+       };
+       const unsigned int nengines = num_uabi_engines(i915);
+       struct intel_engine_cs *engine;
+       int (* const *fn)(void *arg);
+       struct pm_qos_request qos;
+       struct {
+               struct perf_stats p;
+               struct task_struct *tsk;
+       } *engines;
+       int err = 0;
+
+       engines = kcalloc(nengines, sizeof(*engines), GFP_KERNEL);
+       if (!engines)
+               return -ENOMEM;
+
+       cpu_latency_qos_add_request(&qos, 0);
+
+       for (fn = func; *fn; fn++) {
+               char name[KSYM_NAME_LEN];
+               struct igt_live_test t;
+               unsigned int idx;
+
+               snprintf(name, sizeof(name), "%ps", *fn);
+               err = igt_live_test_begin(&t, i915, __func__, name);
+               if (err)
+                       break;
+
+               atomic_set(&i915->selftest.counter, nengines);
+
+               idx = 0;
+               for_each_uabi_engine(engine, i915) {
+                       intel_engine_pm_get(engine);
+
+                       memset(&engines[idx].p, 0, sizeof(engines[idx].p));
+                       engines[idx].p.engine = engine;
+
+                       engines[idx].tsk = kthread_run(*fn, &engines[idx].p,
+                                                      "igt:%s", engine->name);
+                       if (IS_ERR(engines[idx].tsk)) {
+                               err = PTR_ERR(engines[idx].tsk);
+                               intel_engine_pm_put(engine);
+                               break;
+                       }
+                       get_task_struct(engines[idx++].tsk);
+               }
+
+               yield(); /* start all threads before we kthread_stop() */
+
+               idx = 0;
+               for_each_uabi_engine(engine, i915) {
+                       int status;
+
+                       if (IS_ERR(engines[idx].tsk))
+                               break;
+
+                       status = kthread_stop(engines[idx].tsk);
+                       if (status && !err)
+                               err = status;
+
+                       intel_engine_pm_put(engine);
+                       put_task_struct(engines[idx++].tsk);
+               }
+
+               if (igt_live_test_end(&t))
+                       err = -EIO;
+               if (err)
+                       break;
+
+               idx = 0;
+               for_each_uabi_engine(engine, i915) {
+                       struct perf_stats *p = &engines[idx].p;
+                       u64 busy = 100 * ktime_to_ns(p->busy);
+                       u64 dt = ktime_to_ns(p->time);
+                       int integer, decimal;
+
+                       if (dt) {
+                               integer = div64_u64(busy, dt);
+                               busy -= integer * dt;
+                               decimal = div64_u64(100 * busy, dt);
+                       } else {
+                               integer = 0;
+                               decimal = 0;
+                       }
+
+                       GEM_BUG_ON(engine != p->engine);
+                       pr_info("%s %5s: { count:%lu, busy:%d.%02d%%, runtime:%lldms, walltime:%lldms }\n",
+                               name, engine->name, p->count, integer, decimal,
+                               div_u64(p->runtime, 1000 * 1000),
+                               div_u64(ktime_to_ns(p->time), 1000 * 1000));
+                       idx++;
+               }
+       }
+
+       cpu_latency_qos_remove_request(&qos);
+       kfree(engines);
+       return err;
+}
+
+int i915_request_perf_selftests(struct drm_i915_private *i915)
+{
+       static const struct i915_subtest tests[] = {
+               SUBTEST(perf_series_engines),
+               SUBTEST(perf_parallel_engines),
+       };
+
+       if (intel_gt_is_wedged(&i915->gt))
+               return 0;
+
+       return i915_subtests(tests, i915);
+}
index d3bf9eefb6827ef53adfeac60c90c7cf75a4bdd9..1bc11c09faef53182e66d3ab2900dc7317e2e137 100644 (file)
@@ -396,6 +396,35 @@ bool __igt_timeout(unsigned long timeout, const char *fmt, ...)
        return true;
 }
 
+void igt_hexdump(const void *buf, size_t len)
+{
+       const size_t rowsize = 8 * sizeof(u32);
+       const void *prev = NULL;
+       bool skip = false;
+       size_t pos;
+
+       for (pos = 0; pos < len; pos += rowsize) {
+               char line[128];
+
+               if (prev && !memcmp(prev, buf + pos, rowsize)) {
+                       if (!skip) {
+                               pr_info("*\n");
+                               skip = true;
+                       }
+                       continue;
+               }
+
+               WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos,
+                                               rowsize, sizeof(u32),
+                                               line, sizeof(line),
+                                               false) >= sizeof(line));
+               pr_info("[%04zx] %s\n", pos, line);
+
+               prev = buf + pos;
+               skip = false;
+       }
+}
+
 module_param_named(st_random_seed, i915_selftest.random_seed, uint, 0400);
 module_param_named(st_timeout, i915_selftest.timeout_ms, uint, 0400);
 module_param_named(st_filter, i915_selftest.filter, charp, 0400);
index 9ad4ab088466daa1a5fad20c34f7eee6b9c5c77e..e35ba5f9e73f662c1b35af5bade8eaa565706308 100644 (file)
@@ -169,8 +169,7 @@ igt_spinner_create_request(struct igt_spinner *spin,
 
        intel_gt_chipset_flush(engine->gt);
 
-       if (engine->emit_init_breadcrumb &&
-           i915_request_timeline(rq)->has_initial_breadcrumb) {
+       if (engine->emit_init_breadcrumb) {
                err = engine->emit_init_breadcrumb(rq);
                if (err)
                        goto cancel_rq;
index 2a1d4ba1f9f3a798d4e568e4e356144a755a4a5e..6e80d99048e4bd363e04abf493336fd2cd9d11ec 100644 (file)
@@ -594,8 +594,11 @@ create_region_for_mapping(struct intel_memory_region *mr, u64 size, u32 type,
        void *addr;
 
        obj = i915_gem_object_create_region(mr, size, 0);
-       if (IS_ERR(obj))
+       if (IS_ERR(obj)) {
+               if (PTR_ERR(obj) == -ENOSPC) /* Stolen memory */
+                       return ERR_PTR(-ENODEV);
                return obj;
+       }
 
        addr = i915_gem_object_pin_map(obj, type);
        if (IS_ERR(addr)) {
diff --git a/drivers/gpu/drm/i915/selftests/librapl.c b/drivers/gpu/drm/i915/selftests/librapl.c
new file mode 100644 (file)
index 0000000..58710ac
--- /dev/null
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <asm/msr.h>
+
+#include "librapl.h"
+
+u64 librapl_energy_uJ(void)
+{
+       unsigned long long power;
+       u32 units;
+
+       if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
+               return 0;
+
+       units = (power & 0x1f00) >> 8;
+
+       if (rdmsrl_safe(MSR_PP1_ENERGY_STATUS, &power))
+               return 0;
+
+       return (1000000 * power) >> units; /* convert to uJ */
+}
diff --git a/drivers/gpu/drm/i915/selftests/librapl.h b/drivers/gpu/drm/i915/selftests/librapl.h
new file mode 100644 (file)
index 0000000..887f3e9
--- /dev/null
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef SELFTEST_LIBRAPL_H
+#define SELFTEST_LIBRAPL_H
+
+#include <linux/types.h>
+
+u64 librapl_energy_uJ(void);
+
+#endif /* SELFTEST_LIBRAPL_H */
index 754d0eb6beaae1e8decd2034b877b28c31a9f4a1..9b105b811f1f4514a9c5142a0eea05c2919bd78f 100644 (file)
@@ -25,6 +25,8 @@
 #include <linux/pm_domain.h>
 #include <linux/pm_runtime.h>
 
+#include <drm/drm_managed.h>
+
 #include "gt/intel_gt.h"
 #include "gt/intel_gt_requests.h"
 #include "gt/mock_engine.h"
@@ -55,6 +57,9 @@ static void mock_device_release(struct drm_device *dev)
 {
        struct drm_i915_private *i915 = to_i915(dev);
 
+       if (!i915->do_release)
+               goto out;
+
        mock_device_flush(i915);
        intel_gt_driver_remove(&i915->gt);
 
@@ -71,8 +76,9 @@ static void mock_device_release(struct drm_device *dev)
 
        drm_mode_config_cleanup(&i915->drm);
 
-       drm_dev_fini(&i915->drm);
+out:
        put_device(&i915->drm.pdev->dev);
+       i915->drm.pdev = NULL;
 }
 
 static struct drm_driver mock_driver = {
@@ -114,9 +120,14 @@ struct drm_i915_private *mock_gem_device(void)
        struct pci_dev *pdev;
        int err;
 
-       pdev = kzalloc(sizeof(*pdev) + sizeof(*i915), GFP_KERNEL);
+       pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
        if (!pdev)
-               goto err;
+               return NULL;
+       i915 = kzalloc(sizeof(*i915), GFP_KERNEL);
+       if (!i915) {
+               kfree(pdev);
+               return NULL;
+       }
 
        device_initialize(&pdev->dev);
        pdev->class = PCI_BASE_CLASS_DISPLAY << 16;
@@ -129,7 +140,6 @@ struct drm_i915_private *mock_gem_device(void)
        pdev->dev.archdata.iommu = (void *)-1;
 #endif
 
-       i915 = (struct drm_i915_private *)(pdev + 1);
        pci_set_drvdata(pdev, i915);
 
        dev_pm_domain_set(&pdev->dev, &pm_domain);
@@ -141,9 +151,13 @@ struct drm_i915_private *mock_gem_device(void)
        err = drm_dev_init(&i915->drm, &mock_driver, &pdev->dev);
        if (err) {
                pr_err("Failed to initialise mock GEM device: err=%d\n", err);
-               goto put_device;
+               put_device(&pdev->dev);
+               kfree(i915);
+
+               return NULL;
        }
        i915->drm.pdev = pdev;
+       drmm_add_final_kfree(&i915->drm, i915);
 
        intel_runtime_pm_init_early(&i915->runtime_pm);
 
@@ -178,16 +192,18 @@ struct drm_i915_private *mock_gem_device(void)
 
        mkwrite_device_info(i915)->engine_mask = BIT(0);
 
-       i915->engine[RCS0] = mock_engine(i915, "mock", RCS0);
-       if (!i915->engine[RCS0])
+       i915->gt.engine[RCS0] = mock_engine(i915, "mock", RCS0);
+       if (!i915->gt.engine[RCS0])
                goto err_unlock;
 
-       if (mock_engine_init(i915->engine[RCS0]))
+       if (mock_engine_init(i915->gt.engine[RCS0]))
                goto err_context;
 
        __clear_bit(I915_WEDGED, &i915->gt.reset.flags);
        intel_engines_driver_register(i915);
 
+       i915->do_release = true;
+
        return i915;
 
 err_context:
@@ -198,9 +214,7 @@ err_drv:
        intel_gt_driver_late_release(&i915->gt);
        intel_memory_regions_driver_release(i915);
        drm_mode_config_cleanup(&i915->drm);
-       drm_dev_fini(&i915->drm);
-put_device:
-       put_device(&pdev->dev);
-err:
+       drm_dev_put(&i915->drm);
+
        return NULL;
 }
index f22cfbf9353ede21121809335751130a9777217a..ba4ca17fd4d8507186261c2abffeb504fe165cc3 100644 (file)
@@ -18,6 +18,7 @@
 #include <drm/drm_edid.h>
 #include <drm/drm_encoder.h>
 #include <drm/drm_of.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "imx-drm.h"
 
@@ -143,10 +144,6 @@ static const struct drm_encoder_helper_funcs dw_hdmi_imx_encoder_helper_funcs =
        .atomic_check = dw_hdmi_imx_atomic_check,
 };
 
-static const struct drm_encoder_funcs dw_hdmi_imx_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static enum drm_mode_status
 imx6q_hdmi_mode_valid(struct drm_connector *con,
                      const struct drm_display_mode *mode)
@@ -236,8 +233,7 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
                return ret;
 
        drm_encoder_helper_add(encoder, &dw_hdmi_imx_encoder_helper_funcs);
-       drm_encoder_init(drm, encoder, &dw_hdmi_imx_encoder_funcs,
-                        DRM_MODE_ENCODER_TMDS, NULL);
+       drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
 
        platform_set_drvdata(pdev, hdmi);
 
index da87c70e413b4d6d821b991621563c3edf618fc0..2e38f1a5cf8da6a195a9a6ed85f9483960a69953 100644 (file)
@@ -42,12 +42,6 @@ void imx_drm_connector_destroy(struct drm_connector *connector)
 }
 EXPORT_SYMBOL_GPL(imx_drm_connector_destroy);
 
-void imx_drm_encoder_destroy(struct drm_encoder *encoder)
-{
-       drm_encoder_cleanup(encoder);
-}
-EXPORT_SYMBOL_GPL(imx_drm_encoder_destroy);
-
 static int imx_drm_atomic_check(struct drm_device *dev,
                                struct drm_atomic_state *state)
 {
@@ -139,8 +133,8 @@ int imx_drm_encoder_parse_of(struct drm_device *drm,
 
        encoder->possible_crtcs = crtc_mask;
 
-       /* FIXME: this is the mask of outputs which can clone this output. */
-       encoder->possible_clones = ~0;
+       /* FIXME: cloning support not clear, disable it all for now */
+       encoder->possible_clones = 0;
 
        return 0;
 }
index ab9c6f706eb3d8b87db9ac4bac336bf7cd3c9cd9..c3e1a3f14d30cd00de6bdb9352a9d29f90c853ac 100644 (file)
@@ -38,7 +38,6 @@ int imx_drm_encoder_parse_of(struct drm_device *drm,
        struct drm_encoder *encoder, struct device_node *np);
 
 void imx_drm_connector_destroy(struct drm_connector *connector);
-void imx_drm_encoder_destroy(struct drm_encoder *encoder);
 
 int ipu_planes_assign_pre(struct drm_device *dev,
                          struct drm_atomic_state *state);
index 4da22a94790cfca5059de6ab720067154e3cd9ba..66ea68e8da8757d23c33f345280e6c6c60f6beee 100644 (file)
@@ -26,6 +26,7 @@
 #include <drm/drm_panel.h>
 #include <drm/drm_print.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "imx-drm.h"
 
@@ -393,10 +394,6 @@ static const struct drm_connector_helper_funcs imx_ldb_connector_helper_funcs =
        .best_encoder = imx_ldb_connector_best_encoder,
 };
 
-static const struct drm_encoder_funcs imx_ldb_encoder_funcs = {
-       .destroy = imx_drm_encoder_destroy,
-};
-
 static const struct drm_encoder_helper_funcs imx_ldb_encoder_helper_funcs = {
        .atomic_mode_set = imx_ldb_encoder_atomic_mode_set,
        .enable = imx_ldb_encoder_enable,
@@ -441,8 +438,7 @@ static int imx_ldb_register(struct drm_device *drm,
        }
 
        drm_encoder_helper_add(encoder, &imx_ldb_encoder_helper_funcs);
-       drm_encoder_init(drm, encoder, &imx_ldb_encoder_funcs,
-                        DRM_MODE_ENCODER_LVDS, NULL);
+       drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_LVDS);
 
        if (imx_ldb_ch->bridge) {
                ret = drm_bridge_attach(&imx_ldb_ch->encoder,
index 5bbfaa2cd0f47af15f0594f5001857ed5440dcbb..ee63782c77e9cc12db78c643e5f32dd7d7954470 100644 (file)
@@ -21,6 +21,7 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "imx-drm.h"
 
@@ -348,10 +349,6 @@ static const struct drm_connector_helper_funcs imx_tve_connector_helper_funcs =
        .mode_valid = imx_tve_connector_mode_valid,
 };
 
-static const struct drm_encoder_funcs imx_tve_encoder_funcs = {
-       .destroy = imx_drm_encoder_destroy,
-};
-
 static const struct drm_encoder_helper_funcs imx_tve_encoder_helper_funcs = {
        .mode_set = imx_tve_encoder_mode_set,
        .enable = imx_tve_encoder_enable,
@@ -479,8 +476,7 @@ static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve)
                return ret;
 
        drm_encoder_helper_add(&tve->encoder, &imx_tve_encoder_helper_funcs);
-       drm_encoder_init(drm, &tve->encoder, &imx_tve_encoder_funcs,
-                        encoder_type, NULL);
+       drm_simple_encoder_init(drm, &tve->encoder, encoder_type);
 
        drm_connector_helper_add(&tve->connector,
                        &imx_tve_connector_helper_funcs);
index 08fafa4bf8c21302d8d0e464081f6b29281f0291..ac916c84a63185ef584569ab5bba2c599d96c95d 100644 (file)
@@ -18,6 +18,7 @@
 #include <drm/drm_of.h>
 #include <drm/drm_panel.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "imx-drm.h"
 
@@ -256,10 +257,6 @@ static const struct drm_connector_helper_funcs imx_pd_connector_helper_funcs = {
        .best_encoder = imx_pd_connector_best_encoder,
 };
 
-static const struct drm_encoder_funcs imx_pd_encoder_funcs = {
-       .destroy = imx_drm_encoder_destroy,
-};
-
 static const struct drm_bridge_funcs imx_pd_bridge_funcs = {
        .enable = imx_pd_bridge_enable,
        .disable = imx_pd_bridge_disable,
@@ -288,8 +285,7 @@ static int imx_pd_register(struct drm_device *drm,
         */
        imxpd->connector.dpms = DRM_MODE_DPMS_OFF;
 
-       drm_encoder_init(drm, encoder, &imx_pd_encoder_funcs,
-                        DRM_MODE_ENCODER_NONE, NULL);
+       drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_NONE);
 
        imxpd->bridge.funcs = &imx_pd_bridge_funcs;
        drm_bridge_attach(encoder, &imxpd->bridge, NULL, 0);
index 548cc25ea4abed9565f1eebed9af9aa92b908a58..55b49a31729bf180b924cd742a810f355d8e8039 100644 (file)
 #include <drm/drm_fourcc.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_irq.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_of.h>
 #include <drm/drm_panel.h>
 #include <drm/drm_plane.h>
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 #include <drm/drm_vblank.h>
 
 #define JZ_REG_LCD_CFG                         0x00
@@ -488,15 +490,6 @@ static irqreturn_t ingenic_drm_irq_handler(int irq, void *arg)
        return IRQ_HANDLED;
 }
 
-static void ingenic_drm_release(struct drm_device *drm)
-{
-       struct ingenic_drm *priv = drm_device_get_priv(drm);
-
-       drm_mode_config_cleanup(drm);
-       drm_dev_fini(drm);
-       kfree(priv);
-}
-
 static int ingenic_drm_enable_vblank(struct drm_crtc *crtc)
 {
        struct ingenic_drm *priv = drm_crtc_get_priv(crtc);
@@ -540,7 +533,6 @@ static struct drm_driver ingenic_drm_driver_data = {
        .gem_prime_mmap         = drm_gem_cma_prime_mmap,
 
        .irq_handler            = ingenic_drm_irq_handler,
-       .release                = ingenic_drm_release,
 };
 
 static const struct drm_plane_funcs ingenic_drm_primary_plane_funcs = {
@@ -592,10 +584,6 @@ static const struct drm_mode_config_funcs ingenic_drm_mode_config_funcs = {
        .atomic_commit          = drm_atomic_helper_commit,
 };
 
-static const struct drm_encoder_funcs ingenic_drm_encoder_funcs = {
-       .destroy                = drm_encoder_cleanup,
-};
-
 static void ingenic_drm_free_dma_hwdesc(void *d)
 {
        struct ingenic_drm *priv = d;
@@ -623,24 +611,21 @@ static int ingenic_drm_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-       if (!priv)
-               return -ENOMEM;
+       priv = devm_drm_dev_alloc(dev, &ingenic_drm_driver_data,
+                                 struct ingenic_drm, drm);
+       if (IS_ERR(priv))
+               return PTR_ERR(priv);
 
        priv->soc_info = soc_info;
        priv->dev = dev;
        drm = &priv->drm;
-       drm->dev_private = priv;
 
        platform_set_drvdata(pdev, priv);
 
-       ret = devm_drm_dev_init(dev, drm, &ingenic_drm_driver_data);
-       if (ret) {
-               kfree(priv);
+       ret = drmm_mode_config_init(drm);
+       if (ret)
                return ret;
-       }
 
-       drm_mode_config_init(drm);
        drm->mode_config.min_width = 0;
        drm->mode_config.min_height = 0;
        drm->mode_config.max_width = soc_info->max_width;
@@ -661,10 +646,8 @@ static int ingenic_drm_probe(struct platform_device *pdev)
        }
 
        irq = platform_get_irq(pdev, 0);
-       if (irq < 0) {
-               dev_err(dev, "Failed to get platform irq");
+       if (irq < 0)
                return irq;
-       }
 
        if (soc_info->needs_dev_clk) {
                priv->lcd_clk = devm_clk_get(dev, "lcd");
@@ -730,8 +713,8 @@ static int ingenic_drm_probe(struct platform_device *pdev)
        drm_encoder_helper_add(&priv->encoder,
                               &ingenic_drm_encoder_helper_funcs);
 
-       ret = drm_encoder_init(drm, &priv->encoder, &ingenic_drm_encoder_funcs,
-                              DRM_MODE_ENCODER_DPI, NULL);
+       ret = drm_simple_encoder_init(drm, &priv->encoder,
+                                     DRM_MODE_ENCODER_DPI);
        if (ret) {
                dev_err(dev, "Failed to init encoder: %i", ret);
                return ret;
@@ -791,9 +774,7 @@ static int ingenic_drm_probe(struct platform_device *pdev)
                goto err_devclk_disable;
        }
 
-       ret = drm_fbdev_generic_setup(drm, 32);
-       if (ret)
-               dev_warn(dev, "Unable to start fbdev emulation: %i", ret);
+       drm_fbdev_generic_setup(drm, 32);
 
        return 0;
 
index d589f09d04d97f1ee80417764b47b2befa9095f4..fa1d4f5df31e86f4021d61950cef445c845a1c17 100644 (file)
@@ -10,5 +10,7 @@ config DRM_LIMA
        depends on OF
        select DRM_SCHED
        select DRM_GEM_SHMEM_HELPER
+       select PM_DEVFREQ
+       select DEVFREQ_GOV_SIMPLE_ONDEMAND
        help
         DRM driver for ARM Mali 400/450 GPUs.
index a85444b0a1d458594c48ada319c914a7b8d01d79..ca2097b8e1ad0518022ea772e8ebce2ec691e7f8 100644 (file)
@@ -14,6 +14,8 @@ lima-y := \
        lima_sched.o \
        lima_ctx.o \
        lima_dlbu.o \
-       lima_bcast.o
+       lima_bcast.o \
+       lima_trace.o \
+       lima_devfreq.o
 
 obj-$(CONFIG_DRM_LIMA) += lima.o
index 288398027bfa81f4874290d35b95cf9d794eac81..fbc43f243c54d23721837c0f50477c6bdd6a606a 100644 (file)
@@ -26,18 +26,33 @@ void lima_bcast_enable(struct lima_device *dev, int num_pp)
        bcast_write(LIMA_BCAST_BROADCAST_MASK, mask);
 }
 
+static int lima_bcast_hw_init(struct lima_ip *ip)
+{
+       bcast_write(LIMA_BCAST_BROADCAST_MASK, ip->data.mask << 16);
+       bcast_write(LIMA_BCAST_INTERRUPT_MASK, ip->data.mask);
+       return 0;
+}
+
+int lima_bcast_resume(struct lima_ip *ip)
+{
+       return lima_bcast_hw_init(ip);
+}
+
+void lima_bcast_suspend(struct lima_ip *ip)
+{
+
+}
+
 int lima_bcast_init(struct lima_ip *ip)
 {
-       int i, mask = 0;
+       int i;
 
        for (i = lima_ip_pp0; i <= lima_ip_pp7; i++) {
                if (ip->dev->ip[i].present)
-                       mask |= 1 << (i - lima_ip_pp0);
+                       ip->data.mask |= 1 << (i - lima_ip_pp0);
        }
 
-       bcast_write(LIMA_BCAST_BROADCAST_MASK, mask << 16);
-       bcast_write(LIMA_BCAST_INTERRUPT_MASK, mask);
-       return 0;
+       return lima_bcast_hw_init(ip);
 }
 
 void lima_bcast_fini(struct lima_ip *ip)
index c47e58563d0a208e539a4190a4e6a832e6ce6da8..465ee587bceb2f22b3552f8bb7b01338a74990bf 100644 (file)
@@ -6,6 +6,8 @@
 
 struct lima_ip;
 
+int lima_bcast_resume(struct lima_ip *ip);
+void lima_bcast_suspend(struct lima_ip *ip);
 int lima_bcast_init(struct lima_ip *ip);
 void lima_bcast_fini(struct lima_ip *ip);
 
index 22fff6caa961bf3fb5801d0d95975ec158a4a40e..891d5cd5019a7451200de317e9c9887b34d9377a 100644 (file)
@@ -27,6 +27,9 @@ int lima_ctx_create(struct lima_device *dev, struct lima_ctx_mgr *mgr, u32 *id)
        if (err < 0)
                goto err_out0;
 
+       ctx->pid = task_pid_nr(current);
+       get_task_comm(ctx->pname, current);
+
        return 0;
 
 err_out0:
index 6154e5c9bfe4971ca094b4fe91beb988839cbd55..74e2be09090f65ef24872c6c534ff0a419c17e88 100644 (file)
@@ -5,6 +5,7 @@
 #define __LIMA_CTX_H__
 
 #include <linux/xarray.h>
+#include <linux/sched.h>
 
 #include "lima_device.h"
 
@@ -13,6 +14,10 @@ struct lima_ctx {
        struct lima_device *dev;
        struct lima_sched_context context[lima_pipe_num];
        atomic_t guilty;
+
+       /* debug info */
+       char pname[TASK_COMM_LEN];
+       pid_t pid;
 };
 
 struct lima_ctx_mgr {
diff --git a/drivers/gpu/drm/lima/lima_devfreq.c b/drivers/gpu/drm/lima/lima_devfreq.c
new file mode 100644 (file)
index 0000000..bbe0281
--- /dev/null
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ *
+ * Based on panfrost_devfreq.c:
+ *   Copyright 2019 Collabora ltd.
+ */
+#include <linux/clk.h>
+#include <linux/devfreq.h>
+#include <linux/devfreq_cooling.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/property.h>
+
+#include "lima_device.h"
+#include "lima_devfreq.h"
+
+static void lima_devfreq_update_utilization(struct lima_devfreq *devfreq)
+{
+       ktime_t now, last;
+
+       now = ktime_get();
+       last = devfreq->time_last_update;
+
+       if (devfreq->busy_count > 0)
+               devfreq->busy_time += ktime_sub(now, last);
+       else
+               devfreq->idle_time += ktime_sub(now, last);
+
+       devfreq->time_last_update = now;
+}
+
+static int lima_devfreq_target(struct device *dev, unsigned long *freq,
+                              u32 flags)
+{
+       struct dev_pm_opp *opp;
+       int err;
+
+       opp = devfreq_recommended_opp(dev, freq, flags);
+       if (IS_ERR(opp))
+               return PTR_ERR(opp);
+       dev_pm_opp_put(opp);
+
+       err = dev_pm_opp_set_rate(dev, *freq);
+       if (err)
+               return err;
+
+       return 0;
+}
+
+static void lima_devfreq_reset(struct lima_devfreq *devfreq)
+{
+       devfreq->busy_time = 0;
+       devfreq->idle_time = 0;
+       devfreq->time_last_update = ktime_get();
+}
+
+static int lima_devfreq_get_dev_status(struct device *dev,
+                                      struct devfreq_dev_status *status)
+{
+       struct lima_device *ldev = dev_get_drvdata(dev);
+       struct lima_devfreq *devfreq = &ldev->devfreq;
+       unsigned long irqflags;
+
+       status->current_frequency = clk_get_rate(ldev->clk_gpu);
+
+       spin_lock_irqsave(&devfreq->lock, irqflags);
+
+       lima_devfreq_update_utilization(devfreq);
+
+       status->total_time = ktime_to_ns(ktime_add(devfreq->busy_time,
+                                                  devfreq->idle_time));
+       status->busy_time = ktime_to_ns(devfreq->busy_time);
+
+       lima_devfreq_reset(devfreq);
+
+       spin_unlock_irqrestore(&devfreq->lock, irqflags);
+
+       dev_dbg(ldev->dev, "busy %lu total %lu %lu %% freq %lu MHz\n",
+               status->busy_time, status->total_time,
+               status->busy_time / (status->total_time / 100),
+               status->current_frequency / 1000 / 1000);
+
+       return 0;
+}
+
+static struct devfreq_dev_profile lima_devfreq_profile = {
+       .polling_ms = 50, /* ~3 frames */
+       .target = lima_devfreq_target,
+       .get_dev_status = lima_devfreq_get_dev_status,
+};
+
+void lima_devfreq_fini(struct lima_device *ldev)
+{
+       struct lima_devfreq *devfreq = &ldev->devfreq;
+
+       if (devfreq->cooling) {
+               devfreq_cooling_unregister(devfreq->cooling);
+               devfreq->cooling = NULL;
+       }
+
+       if (devfreq->devfreq) {
+               devm_devfreq_remove_device(ldev->dev, devfreq->devfreq);
+               devfreq->devfreq = NULL;
+       }
+
+       if (devfreq->opp_of_table_added) {
+               dev_pm_opp_of_remove_table(ldev->dev);
+               devfreq->opp_of_table_added = false;
+       }
+
+       if (devfreq->regulators_opp_table) {
+               dev_pm_opp_put_regulators(devfreq->regulators_opp_table);
+               devfreq->regulators_opp_table = NULL;
+       }
+
+       if (devfreq->clkname_opp_table) {
+               dev_pm_opp_put_clkname(devfreq->clkname_opp_table);
+               devfreq->clkname_opp_table = NULL;
+       }
+}
+
+int lima_devfreq_init(struct lima_device *ldev)
+{
+       struct thermal_cooling_device *cooling;
+       struct device *dev = ldev->dev;
+       struct opp_table *opp_table;
+       struct devfreq *devfreq;
+       struct lima_devfreq *ldevfreq = &ldev->devfreq;
+       struct dev_pm_opp *opp;
+       unsigned long cur_freq;
+       int ret;
+
+       if (!device_property_present(dev, "operating-points-v2"))
+               /* Optional, continue without devfreq */
+               return 0;
+
+       spin_lock_init(&ldevfreq->lock);
+
+       opp_table = dev_pm_opp_set_clkname(dev, "core");
+       if (IS_ERR(opp_table)) {
+               ret = PTR_ERR(opp_table);
+               goto err_fini;
+       }
+
+       ldevfreq->clkname_opp_table = opp_table;
+
+       opp_table = dev_pm_opp_set_regulators(dev,
+                                             (const char *[]){ "mali" },
+                                             1);
+       if (IS_ERR(opp_table)) {
+               ret = PTR_ERR(opp_table);
+
+               /* Continue if the optional regulator is missing */
+               if (ret != -ENODEV)
+                       goto err_fini;
+       } else {
+               ldevfreq->regulators_opp_table = opp_table;
+       }
+
+       ret = dev_pm_opp_of_add_table(dev);
+       if (ret)
+               goto err_fini;
+       ldevfreq->opp_of_table_added = true;
+
+       lima_devfreq_reset(ldevfreq);
+
+       cur_freq = clk_get_rate(ldev->clk_gpu);
+
+       opp = devfreq_recommended_opp(dev, &cur_freq, 0);
+       if (IS_ERR(opp)) {
+               ret = PTR_ERR(opp);
+               goto err_fini;
+       }
+
+       lima_devfreq_profile.initial_freq = cur_freq;
+       dev_pm_opp_put(opp);
+
+       devfreq = devm_devfreq_add_device(dev, &lima_devfreq_profile,
+                                         DEVFREQ_GOV_SIMPLE_ONDEMAND, NULL);
+       if (IS_ERR(devfreq)) {
+               dev_err(dev, "Couldn't initialize GPU devfreq\n");
+               ret = PTR_ERR(devfreq);
+               goto err_fini;
+       }
+
+       ldevfreq->devfreq = devfreq;
+
+       cooling = of_devfreq_cooling_register(dev->of_node, devfreq);
+       if (IS_ERR(cooling))
+               dev_info(dev, "Failed to register cooling device\n");
+       else
+               ldevfreq->cooling = cooling;
+
+       return 0;
+
+err_fini:
+       lima_devfreq_fini(ldev);
+       return ret;
+}
+
+void lima_devfreq_record_busy(struct lima_devfreq *devfreq)
+{
+       unsigned long irqflags;
+
+       if (!devfreq->devfreq)
+               return;
+
+       spin_lock_irqsave(&devfreq->lock, irqflags);
+
+       lima_devfreq_update_utilization(devfreq);
+
+       devfreq->busy_count++;
+
+       spin_unlock_irqrestore(&devfreq->lock, irqflags);
+}
+
+void lima_devfreq_record_idle(struct lima_devfreq *devfreq)
+{
+       unsigned long irqflags;
+
+       if (!devfreq->devfreq)
+               return;
+
+       spin_lock_irqsave(&devfreq->lock, irqflags);
+
+       lima_devfreq_update_utilization(devfreq);
+
+       WARN_ON(--devfreq->busy_count < 0);
+
+       spin_unlock_irqrestore(&devfreq->lock, irqflags);
+}
+
+int lima_devfreq_resume(struct lima_devfreq *devfreq)
+{
+       unsigned long irqflags;
+
+       if (!devfreq->devfreq)
+               return 0;
+
+       spin_lock_irqsave(&devfreq->lock, irqflags);
+
+       lima_devfreq_reset(devfreq);
+
+       spin_unlock_irqrestore(&devfreq->lock, irqflags);
+
+       return devfreq_resume_device(devfreq->devfreq);
+}
+
+int lima_devfreq_suspend(struct lima_devfreq *devfreq)
+{
+       if (!devfreq->devfreq)
+               return 0;
+
+       return devfreq_suspend_device(devfreq->devfreq);
+}
diff --git a/drivers/gpu/drm/lima/lima_devfreq.h b/drivers/gpu/drm/lima/lima_devfreq.h
new file mode 100644 (file)
index 0000000..5eed297
--- /dev/null
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2020 Martin Blumenstingl <martin.blumenstingl@googlemail.com> */
+
+#ifndef __LIMA_DEVFREQ_H__
+#define __LIMA_DEVFREQ_H__
+
+#include <linux/spinlock.h>
+#include <linux/ktime.h>
+
+struct devfreq;
+struct opp_table;
+struct thermal_cooling_device;
+
+struct lima_device;
+
+struct lima_devfreq {
+       struct devfreq *devfreq;
+       struct opp_table *clkname_opp_table;
+       struct opp_table *regulators_opp_table;
+       struct thermal_cooling_device *cooling;
+       bool opp_of_table_added;
+
+       ktime_t busy_time;
+       ktime_t idle_time;
+       ktime_t time_last_update;
+       int busy_count;
+       /*
+        * Protect busy_time, idle_time, time_last_update and busy_count
+        * because these can be updated concurrently, for example by the GP
+        * and PP interrupts.
+        */
+       spinlock_t lock;
+};
+
+int lima_devfreq_init(struct lima_device *ldev);
+void lima_devfreq_fini(struct lima_device *ldev);
+
+void lima_devfreq_record_busy(struct lima_devfreq *devfreq);
+void lima_devfreq_record_idle(struct lima_devfreq *devfreq);
+
+int lima_devfreq_resume(struct lima_devfreq *devfreq);
+int lima_devfreq_suspend(struct lima_devfreq *devfreq);
+
+#endif
index 19829b5430242f64d3fb88f6eb88548da9e5faf9..65fdca366e41f00d940f13a0865234fc47df5595 100644 (file)
@@ -25,6 +25,8 @@ struct lima_ip_desc {
 
        int (*init)(struct lima_ip *ip);
        void (*fini)(struct lima_ip *ip);
+       int (*resume)(struct lima_ip *ip);
+       void (*suspend)(struct lima_ip *ip);
 };
 
 #define LIMA_IP_DESC(ipname, mst0, mst1, off0, off1, func, irq) \
@@ -41,6 +43,8 @@ struct lima_ip_desc {
                }, \
                .init = lima_##func##_init, \
                .fini = lima_##func##_fini, \
+               .resume = lima_##func##_resume, \
+               .suspend = lima_##func##_suspend, \
        }
 
 static struct lima_ip_desc lima_ip_desc[lima_ip_num] = {
@@ -77,26 +81,10 @@ const char *lima_ip_name(struct lima_ip *ip)
        return lima_ip_desc[ip->id].name;
 }
 
-static int lima_clk_init(struct lima_device *dev)
+static int lima_clk_enable(struct lima_device *dev)
 {
        int err;
 
-       dev->clk_bus = devm_clk_get(dev->dev, "bus");
-       if (IS_ERR(dev->clk_bus)) {
-               err = PTR_ERR(dev->clk_bus);
-               if (err != -EPROBE_DEFER)
-                       dev_err(dev->dev, "get bus clk failed %d\n", err);
-               return err;
-       }
-
-       dev->clk_gpu = devm_clk_get(dev->dev, "core");
-       if (IS_ERR(dev->clk_gpu)) {
-               err = PTR_ERR(dev->clk_gpu);
-               if (err != -EPROBE_DEFER)
-                       dev_err(dev->dev, "get core clk failed %d\n", err);
-               return err;
-       }
-
        err = clk_prepare_enable(dev->clk_bus);
        if (err)
                return err;
@@ -105,15 +93,7 @@ static int lima_clk_init(struct lima_device *dev)
        if (err)
                goto error_out0;
 
-       dev->reset = devm_reset_control_array_get_optional_shared(dev->dev);
-
-       if (IS_ERR(dev->reset)) {
-               err = PTR_ERR(dev->reset);
-               if (err != -EPROBE_DEFER)
-                       dev_err(dev->dev, "get reset controller failed %d\n",
-                               err);
-               goto error_out1;
-       } else if (dev->reset != NULL) {
+       if (dev->reset) {
                err = reset_control_deassert(dev->reset);
                if (err) {
                        dev_err(dev->dev,
@@ -131,14 +111,76 @@ error_out0:
        return err;
 }
 
-static void lima_clk_fini(struct lima_device *dev)
+static void lima_clk_disable(struct lima_device *dev)
 {
-       if (dev->reset != NULL)
+       if (dev->reset)
                reset_control_assert(dev->reset);
        clk_disable_unprepare(dev->clk_gpu);
        clk_disable_unprepare(dev->clk_bus);
 }
 
+static int lima_clk_init(struct lima_device *dev)
+{
+       int err;
+
+       dev->clk_bus = devm_clk_get(dev->dev, "bus");
+       if (IS_ERR(dev->clk_bus)) {
+               err = PTR_ERR(dev->clk_bus);
+               if (err != -EPROBE_DEFER)
+                       dev_err(dev->dev, "get bus clk failed %d\n", err);
+               dev->clk_bus = NULL;
+               return err;
+       }
+
+       dev->clk_gpu = devm_clk_get(dev->dev, "core");
+       if (IS_ERR(dev->clk_gpu)) {
+               err = PTR_ERR(dev->clk_gpu);
+               if (err != -EPROBE_DEFER)
+                       dev_err(dev->dev, "get core clk failed %d\n", err);
+               dev->clk_gpu = NULL;
+               return err;
+       }
+
+       dev->reset = devm_reset_control_array_get_optional_shared(dev->dev);
+       if (IS_ERR(dev->reset)) {
+               err = PTR_ERR(dev->reset);
+               if (err != -EPROBE_DEFER)
+                       dev_err(dev->dev, "get reset controller failed %d\n",
+                               err);
+               dev->reset = NULL;
+               return err;
+       }
+
+       return lima_clk_enable(dev);
+}
+
+static void lima_clk_fini(struct lima_device *dev)
+{
+       lima_clk_disable(dev);
+}
+
+static int lima_regulator_enable(struct lima_device *dev)
+{
+       int ret;
+
+       if (!dev->regulator)
+               return 0;
+
+       ret = regulator_enable(dev->regulator);
+       if (ret < 0) {
+               dev_err(dev->dev, "failed to enable regulator: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void lima_regulator_disable(struct lima_device *dev)
+{
+       if (dev->regulator)
+               regulator_disable(dev->regulator);
+}
+
 static int lima_regulator_init(struct lima_device *dev)
 {
        int ret;
@@ -154,25 +196,20 @@ static int lima_regulator_init(struct lima_device *dev)
                return ret;
        }
 
-       ret = regulator_enable(dev->regulator);
-       if (ret < 0) {
-               dev_err(dev->dev, "failed to enable regulator: %d\n", ret);
-               return ret;
-       }
-
-       return 0;
+       return lima_regulator_enable(dev);
 }
 
 static void lima_regulator_fini(struct lima_device *dev)
 {
-       if (dev->regulator)
-               regulator_disable(dev->regulator);
+       lima_regulator_disable(dev);
 }
 
 static int lima_init_ip(struct lima_device *dev, int index)
 {
+       struct platform_device *pdev = to_platform_device(dev->dev);
        struct lima_ip_desc *desc = lima_ip_desc + index;
        struct lima_ip *ip = dev->ip + index;
+       const char *irq_name = desc->irq_name;
        int offset = desc->offset[dev->id];
        bool must = desc->must_have[dev->id];
        int err;
@@ -183,8 +220,9 @@ static int lima_init_ip(struct lima_device *dev, int index)
        ip->dev = dev;
        ip->id = index;
        ip->iomem = dev->iomem + offset;
-       if (desc->irq_name) {
-               err = platform_get_irq_byname(dev->pdev, desc->irq_name);
+       if (irq_name) {
+               err = must ? platform_get_irq_byname(pdev, irq_name) :
+                            platform_get_irq_byname_optional(pdev, irq_name);
                if (err < 0)
                        goto out;
                ip->irq = err;
@@ -209,11 +247,34 @@ static void lima_fini_ip(struct lima_device *ldev, int index)
                desc->fini(ip);
 }
 
+static int lima_resume_ip(struct lima_device *ldev, int index)
+{
+       struct lima_ip_desc *desc = lima_ip_desc + index;
+       struct lima_ip *ip = ldev->ip + index;
+       int ret = 0;
+
+       if (ip->present)
+               ret = desc->resume(ip);
+
+       return ret;
+}
+
+static void lima_suspend_ip(struct lima_device *ldev, int index)
+{
+       struct lima_ip_desc *desc = lima_ip_desc + index;
+       struct lima_ip *ip = ldev->ip + index;
+
+       if (ip->present)
+               desc->suspend(ip);
+}
+
 static int lima_init_gp_pipe(struct lima_device *dev)
 {
        struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp;
        int err;
 
+       pipe->ldev = dev;
+
        err = lima_sched_pipe_init(pipe, "gp");
        if (err)
                return err;
@@ -244,6 +305,8 @@ static int lima_init_pp_pipe(struct lima_device *dev)
        struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
        int err, i;
 
+       pipe->ldev = dev;
+
        err = lima_sched_pipe_init(pipe, "pp");
        if (err)
                return err;
@@ -290,8 +353,8 @@ static void lima_fini_pp_pipe(struct lima_device *dev)
 
 int lima_device_init(struct lima_device *ldev)
 {
+       struct platform_device *pdev = to_platform_device(ldev->dev);
        int err, i;
-       struct resource *res;
 
        dma_set_coherent_mask(ldev->dev, DMA_BIT_MASK(32));
 
@@ -322,8 +385,7 @@ int lima_device_init(struct lima_device *ldev)
        } else
                ldev->va_end = LIMA_VA_RESERVE_END;
 
-       res = platform_get_resource(ldev->pdev, IORESOURCE_MEM, 0);
-       ldev->iomem = devm_ioremap_resource(ldev->dev, res);
+       ldev->iomem = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(ldev->iomem)) {
                dev_err(ldev->dev, "fail to ioremap iomem\n");
                err = PTR_ERR(ldev->iomem);
@@ -344,6 +406,12 @@ int lima_device_init(struct lima_device *ldev)
        if (err)
                goto err_out5;
 
+       ldev->dump.magic = LIMA_DUMP_MAGIC;
+       ldev->dump.version_major = LIMA_DUMP_MAJOR;
+       ldev->dump.version_minor = LIMA_DUMP_MINOR;
+       INIT_LIST_HEAD(&ldev->error_task_list);
+       mutex_init(&ldev->error_task_list_lock);
+
        dev_info(ldev->dev, "bus rate = %lu\n", clk_get_rate(ldev->clk_bus));
        dev_info(ldev->dev, "mod rate = %lu", clk_get_rate(ldev->clk_gpu));
 
@@ -370,6 +438,13 @@ err_out0:
 void lima_device_fini(struct lima_device *ldev)
 {
        int i;
+       struct lima_sched_error_task *et, *tmp;
+
+       list_for_each_entry_safe(et, tmp, &ldev->error_task_list, list) {
+               list_del(&et->list);
+               kvfree(et);
+       }
+       mutex_destroy(&ldev->error_task_list_lock);
 
        lima_fini_pp_pipe(ldev);
        lima_fini_gp_pipe(ldev);
@@ -387,3 +462,72 @@ void lima_device_fini(struct lima_device *ldev)
 
        lima_clk_fini(ldev);
 }
+
+int lima_device_resume(struct device *dev)
+{
+       struct lima_device *ldev = dev_get_drvdata(dev);
+       int i, err;
+
+       err = lima_clk_enable(ldev);
+       if (err) {
+               dev_err(dev, "resume clk fail %d\n", err);
+               return err;
+       }
+
+       err = lima_regulator_enable(ldev);
+       if (err) {
+               dev_err(dev, "resume regulator fail %d\n", err);
+               goto err_out0;
+       }
+
+       for (i = 0; i < lima_ip_num; i++) {
+               err = lima_resume_ip(ldev, i);
+               if (err) {
+                       dev_err(dev, "resume ip %d fail\n", i);
+                       goto err_out1;
+               }
+       }
+
+       err = lima_devfreq_resume(&ldev->devfreq);
+       if (err) {
+               dev_err(dev, "devfreq resume fail\n");
+               goto err_out1;
+       }
+
+       return 0;
+
+err_out1:
+       while (--i >= 0)
+               lima_suspend_ip(ldev, i);
+       lima_regulator_disable(ldev);
+err_out0:
+       lima_clk_disable(ldev);
+       return err;
+}
+
+int lima_device_suspend(struct device *dev)
+{
+       struct lima_device *ldev = dev_get_drvdata(dev);
+       int i, err;
+
+       /* check any task running */
+       for (i = 0; i < lima_pipe_num; i++) {
+               if (atomic_read(&ldev->pipe[i].base.hw_rq_count))
+                       return -EBUSY;
+       }
+
+       err = lima_devfreq_suspend(&ldev->devfreq);
+       if (err) {
+               dev_err(dev, "devfreq suspend fail\n");
+               return err;
+       }
+
+       for (i = lima_ip_num - 1; i >= 0; i--)
+               lima_suspend_ip(ldev, i);
+
+       lima_regulator_disable(ldev);
+
+       lima_clk_disable(ldev);
+
+       return 0;
+}
index 31158d86271c2b326b0c9b9e7a8917235e6b80ea..41b9d7b4bcc7a0128adda97379b6048735310096 100644 (file)
@@ -6,8 +6,12 @@
 
 #include <drm/drm_device.h>
 #include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
 
 #include "lima_sched.h"
+#include "lima_dump.h"
+#include "lima_devfreq.h"
 
 enum lima_gpu_id {
        lima_gpu_mali400 = 0,
@@ -60,6 +64,8 @@ struct lima_ip {
                bool async_reset;
                /* l2 cache */
                spinlock_t lock;
+               /* pmu/bcast */
+               u32 mask;
        } data;
 };
 
@@ -72,7 +78,6 @@ enum lima_pipe_id {
 struct lima_device {
        struct device *dev;
        struct drm_device *ddev;
-       struct platform_device *pdev;
 
        enum lima_gpu_id id;
        u32 gp_version;
@@ -94,6 +99,13 @@ struct lima_device {
 
        u32 *dlbu_cpu;
        dma_addr_t dlbu_dma;
+
+       struct lima_devfreq devfreq;
+
+       /* debug info */
+       struct lima_dump_head dump;
+       struct list_head error_task_list;
+       struct mutex error_task_list_lock;
 };
 
 static inline struct lima_device *
@@ -128,4 +140,7 @@ static inline int lima_poll_timeout(struct lima_ip *ip, lima_poll_func_t func,
        return 0;
 }
 
+int lima_device_suspend(struct device *dev);
+int lima_device_resume(struct device *dev);
+
 #endif
index 8399ceffb94bb8540ee4640ac4114bd652336eea..c1d5ea35daa7aecbea830adf7f946bc644c5166d 100644 (file)
@@ -42,7 +42,7 @@ void lima_dlbu_set_reg(struct lima_ip *ip, u32 *reg)
        dlbu_write(LIMA_DLBU_START_TILE_POS, reg[3]);
 }
 
-int lima_dlbu_init(struct lima_ip *ip)
+static int lima_dlbu_hw_init(struct lima_ip *ip)
 {
        struct lima_device *dev = ip->dev;
 
@@ -52,6 +52,21 @@ int lima_dlbu_init(struct lima_ip *ip)
        return 0;
 }
 
+int lima_dlbu_resume(struct lima_ip *ip)
+{
+       return lima_dlbu_hw_init(ip);
+}
+
+void lima_dlbu_suspend(struct lima_ip *ip)
+{
+
+}
+
+int lima_dlbu_init(struct lima_ip *ip)
+{
+       return lima_dlbu_hw_init(ip);
+}
+
 void lima_dlbu_fini(struct lima_ip *ip)
 {
 
index 16f877984466d4eb7849e7b076e8a62075862517..be71daaaee892f46a777995da6bbf387cf657321 100644 (file)
@@ -12,6 +12,8 @@ void lima_dlbu_disable(struct lima_device *dev);
 
 void lima_dlbu_set_reg(struct lima_ip *ip, u32 *reg);
 
+int lima_dlbu_resume(struct lima_ip *ip);
+void lima_dlbu_suspend(struct lima_ip *ip);
 int lima_dlbu_init(struct lima_ip *ip);
 void lima_dlbu_fini(struct lima_ip *ip);
 
index 2daac64d8955c6d43d283fe14338d84db33bbed2..a831565af81345a9f8959b01704fadd7c17aa5ec 100644 (file)
@@ -5,17 +5,20 @@
 #include <linux/of_platform.h>
 #include <linux/uaccess.h>
 #include <linux/slab.h>
+#include <linux/pm_runtime.h>
 #include <drm/drm_ioctl.h>
 #include <drm/drm_drv.h>
 #include <drm/drm_prime.h>
 #include <drm/lima_drm.h>
 
+#include "lima_device.h"
 #include "lima_drv.h"
 #include "lima_gem.h"
 #include "lima_vm.h"
 
 int lima_sched_timeout_ms;
 uint lima_heap_init_nr_pages = 8;
+uint lima_max_error_tasks;
 
 MODULE_PARM_DESC(sched_timeout_ms, "task run timeout in ms");
 module_param_named(sched_timeout_ms, lima_sched_timeout_ms, int, 0444);
@@ -23,6 +26,9 @@ module_param_named(sched_timeout_ms, lima_sched_timeout_ms, int, 0444);
 MODULE_PARM_DESC(heap_init_nr_pages, "heap buffer init number of pages");
 module_param_named(heap_init_nr_pages, lima_heap_init_nr_pages, uint, 0444);
 
+MODULE_PARM_DESC(max_error_tasks, "max number of error tasks to save");
+module_param_named(max_error_tasks, lima_max_error_tasks, uint, 0644);
+
 static int lima_ioctl_get_param(struct drm_device *dev, void *data, struct drm_file *file)
 {
        struct drm_lima_get_param *args = data;
@@ -272,6 +278,93 @@ static struct drm_driver lima_drm_driver = {
        .gem_prime_mmap = drm_gem_prime_mmap,
 };
 
+struct lima_block_reader {
+       void *dst;
+       size_t base;
+       size_t count;
+       size_t off;
+       ssize_t read;
+};
+
+static bool lima_read_block(struct lima_block_reader *reader,
+                           void *src, size_t src_size)
+{
+       size_t max_off = reader->base + src_size;
+
+       if (reader->off < max_off) {
+               size_t size = min_t(size_t, max_off - reader->off,
+                                   reader->count);
+
+               memcpy(reader->dst, src + (reader->off - reader->base), size);
+
+               reader->dst += size;
+               reader->off += size;
+               reader->read += size;
+               reader->count -= size;
+       }
+
+       reader->base = max_off;
+
+       return !!reader->count;
+}
+
+static ssize_t lima_error_state_read(struct file *filp, struct kobject *kobj,
+                                    struct bin_attribute *attr, char *buf,
+                                    loff_t off, size_t count)
+{
+       struct device *dev = kobj_to_dev(kobj);
+       struct lima_device *ldev = dev_get_drvdata(dev);
+       struct lima_sched_error_task *et;
+       struct lima_block_reader reader = {
+               .dst = buf,
+               .count = count,
+               .off = off,
+       };
+
+       mutex_lock(&ldev->error_task_list_lock);
+
+       if (lima_read_block(&reader, &ldev->dump, sizeof(ldev->dump))) {
+               list_for_each_entry(et, &ldev->error_task_list, list) {
+                       if (!lima_read_block(&reader, et->data, et->size))
+                               break;
+               }
+       }
+
+       mutex_unlock(&ldev->error_task_list_lock);
+       return reader.read;
+}
+
+static ssize_t lima_error_state_write(struct file *file, struct kobject *kobj,
+                                     struct bin_attribute *attr, char *buf,
+                                     loff_t off, size_t count)
+{
+       struct device *dev = kobj_to_dev(kobj);
+       struct lima_device *ldev = dev_get_drvdata(dev);
+       struct lima_sched_error_task *et, *tmp;
+
+       mutex_lock(&ldev->error_task_list_lock);
+
+       list_for_each_entry_safe(et, tmp, &ldev->error_task_list, list) {
+               list_del(&et->list);
+               kvfree(et);
+       }
+
+       ldev->dump.size = 0;
+       ldev->dump.num_tasks = 0;
+
+       mutex_unlock(&ldev->error_task_list_lock);
+
+       return count;
+}
+
+static const struct bin_attribute lima_error_state_attr = {
+       .attr.name = "error",
+       .attr.mode = 0600,
+       .size = 0,
+       .read = lima_error_state_read,
+       .write = lima_error_state_write,
+};
+
 static int lima_pdev_probe(struct platform_device *pdev)
 {
        struct lima_device *ldev;
@@ -288,7 +381,6 @@ static int lima_pdev_probe(struct platform_device *pdev)
                goto err_out0;
        }
 
-       ldev->pdev = pdev;
        ldev->dev = &pdev->dev;
        ldev->id = (enum lima_gpu_id)of_device_get_match_data(&pdev->dev);
 
@@ -306,16 +398,34 @@ static int lima_pdev_probe(struct platform_device *pdev)
        if (err)
                goto err_out1;
 
+       err = lima_devfreq_init(ldev);
+       if (err) {
+               dev_err(&pdev->dev, "Fatal error during devfreq init\n");
+               goto err_out2;
+       }
+
+       pm_runtime_set_active(ldev->dev);
+       pm_runtime_mark_last_busy(ldev->dev);
+       pm_runtime_set_autosuspend_delay(ldev->dev, 200);
+       pm_runtime_use_autosuspend(ldev->dev);
+       pm_runtime_enable(ldev->dev);
+
        /*
         * Register the DRM device with the core and the connectors with
         * sysfs.
         */
        err = drm_dev_register(ddev, 0);
        if (err < 0)
-               goto err_out2;
+               goto err_out3;
+
+       if (sysfs_create_bin_file(&ldev->dev->kobj, &lima_error_state_attr))
+               dev_warn(ldev->dev, "fail to create error state sysfs\n");
 
        return 0;
 
+err_out3:
+       pm_runtime_disable(ldev->dev);
+       lima_devfreq_fini(ldev);
 err_out2:
        lima_device_fini(ldev);
 err_out1:
@@ -330,8 +440,17 @@ static int lima_pdev_remove(struct platform_device *pdev)
        struct lima_device *ldev = platform_get_drvdata(pdev);
        struct drm_device *ddev = ldev->ddev;
 
+       sysfs_remove_bin_file(&ldev->dev->kobj, &lima_error_state_attr);
+
        drm_dev_unregister(ddev);
+
+       /* stop autosuspend to make sure device is in active state */
+       pm_runtime_set_autosuspend_delay(ldev->dev, -1);
+       pm_runtime_disable(ldev->dev);
+
+       lima_devfreq_fini(ldev);
        lima_device_fini(ldev);
+
        drm_dev_put(ddev);
        lima_sched_slab_fini();
        return 0;
@@ -344,26 +463,22 @@ static const struct of_device_id dt_match[] = {
 };
 MODULE_DEVICE_TABLE(of, dt_match);
 
+static const struct dev_pm_ops lima_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+       SET_RUNTIME_PM_OPS(lima_device_suspend, lima_device_resume, NULL)
+};
+
 static struct platform_driver lima_platform_driver = {
        .probe      = lima_pdev_probe,
        .remove     = lima_pdev_remove,
        .driver     = {
                .name   = "lima",
+               .pm     = &lima_pm_ops,
                .of_match_table = dt_match,
        },
 };
 
-static int __init lima_init(void)
-{
-       return platform_driver_register(&lima_platform_driver);
-}
-module_init(lima_init);
-
-static void __exit lima_exit(void)
-{
-       platform_driver_unregister(&lima_platform_driver);
-}
-module_exit(lima_exit);
+module_platform_driver(lima_platform_driver);
 
 MODULE_AUTHOR("Lima Project Developers");
 MODULE_DESCRIPTION("Lima DRM Driver");
index f492ecc6a5d9e127bb5b752d8b5ab3f33b455e6b..fdbd4077c768ded9fbb56114558d5aded58b898a 100644 (file)
@@ -10,6 +10,7 @@
 
 extern int lima_sched_timeout_ms;
 extern uint lima_heap_init_nr_pages;
+extern uint lima_max_error_tasks;
 
 struct lima_vm;
 struct lima_bo;
diff --git a/drivers/gpu/drm/lima/lima_dump.h b/drivers/gpu/drm/lima/lima_dump.h
new file mode 100644 (file)
index 0000000..ca243d9
--- /dev/null
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2020 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_DUMP_H__
+#define __LIMA_DUMP_H__
+
+#include <linux/types.h>
+
+/**
+ * dump file format for all the information to start a lima task
+ *
+ * top level format
+ * | magic code "LIMA" | format version | num tasks | data size |
+ * | reserved | reserved | reserved | reserved |
+ * | task 1 ID | task 1 size | num chunks | reserved | task 1 data |
+ * | task 2 ID | task 2 size | num chunks | reserved | task 2 data |
+ * ...
+ *
+ * task data format
+ * | chunk 1 ID | chunk 1 size | reserved | reserved | chunk 1 data |
+ * | chunk 2 ID | chunk 2 size | reserved | reserved | chunk 2 data |
+ * ...
+ *
+ */
+
+#define LIMA_DUMP_MAJOR 1
+#define LIMA_DUMP_MINOR 0
+
+#define LIMA_DUMP_MAGIC 0x414d494c
+
+struct lima_dump_head {
+       __u32 magic;
+       __u16 version_major;
+       __u16 version_minor;
+       __u32 num_tasks;
+       __u32 size;
+       __u32 reserved[4];
+};
+
+#define LIMA_DUMP_TASK_GP   0
+#define LIMA_DUMP_TASK_PP   1
+#define LIMA_DUMP_TASK_NUM  2
+
+struct lima_dump_task {
+       __u32 id;
+       __u32 size;
+       __u32 num_chunks;
+       __u32 reserved;
+};
+
+#define LIMA_DUMP_CHUNK_FRAME         0
+#define LIMA_DUMP_CHUNK_BUFFER        1
+#define LIMA_DUMP_CHUNK_PROCESS_NAME  2
+#define LIMA_DUMP_CHUNK_PROCESS_ID    3
+#define LIMA_DUMP_CHUNK_NUM           4
+
+struct lima_dump_chunk {
+       __u32 id;
+       __u32 size;
+       __u32 reserved[2];
+};
+
+struct lima_dump_chunk_buffer {
+       __u32 id;
+       __u32 size;
+       __u32 va;
+       __u32 reserved;
+};
+
+struct lima_dump_chunk_pid {
+       __u32 id;
+       __u32 size;
+       __u32 pid;
+       __u32 reserved;
+};
+
+#endif
index d8841c870d906ab6dcc8269b23a8f13ddb14ee67..8dd501b7a3d0d8bd3656429c2508e0f937f66e04 100644 (file)
@@ -274,6 +274,23 @@ static void lima_gp_print_version(struct lima_ip *ip)
 static struct kmem_cache *lima_gp_task_slab;
 static int lima_gp_task_slab_refcnt;
 
+static int lima_gp_hw_init(struct lima_ip *ip)
+{
+       ip->data.async_reset = false;
+       lima_gp_soft_reset_async(ip);
+       return lima_gp_soft_reset_async_wait(ip);
+}
+
+int lima_gp_resume(struct lima_ip *ip)
+{
+       return lima_gp_hw_init(ip);
+}
+
+void lima_gp_suspend(struct lima_ip *ip)
+{
+
+}
+
 int lima_gp_init(struct lima_ip *ip)
 {
        struct lima_device *dev = ip->dev;
@@ -281,9 +298,7 @@ int lima_gp_init(struct lima_ip *ip)
 
        lima_gp_print_version(ip);
 
-       ip->data.async_reset = false;
-       lima_gp_soft_reset_async(ip);
-       err = lima_gp_soft_reset_async_wait(ip);
+       err = lima_gp_hw_init(ip);
        if (err)
                return err;
 
index 516e5c1babbb45643bbfd243391fae3b2ab24d0b..02ec9af78a518e8fad9242b754c446d62bb6549f 100644 (file)
@@ -7,6 +7,8 @@
 struct lima_ip;
 struct lima_device;
 
+int lima_gp_resume(struct lima_ip *ip);
+void lima_gp_suspend(struct lima_ip *ip);
 int lima_gp_init(struct lima_ip *ip);
 void lima_gp_fini(struct lima_ip *ip);
 
index 6873a7af5a5ce7a6690079d1900e94c43a7fa01a..c4080a02957ba808307d31190babae70ed116aa1 100644 (file)
@@ -38,9 +38,35 @@ int lima_l2_cache_flush(struct lima_ip *ip)
        return ret;
 }
 
+static int lima_l2_cache_hw_init(struct lima_ip *ip)
+{
+       int err;
+
+       err = lima_l2_cache_flush(ip);
+       if (err)
+               return err;
+
+       l2_cache_write(LIMA_L2_CACHE_ENABLE,
+                      LIMA_L2_CACHE_ENABLE_ACCESS |
+                      LIMA_L2_CACHE_ENABLE_READ_ALLOCATE);
+       l2_cache_write(LIMA_L2_CACHE_MAX_READS, 0x1c);
+
+       return 0;
+}
+
+int lima_l2_cache_resume(struct lima_ip *ip)
+{
+       return lima_l2_cache_hw_init(ip);
+}
+
+void lima_l2_cache_suspend(struct lima_ip *ip)
+{
+
+}
+
 int lima_l2_cache_init(struct lima_ip *ip)
 {
-       int i, err;
+       int i;
        u32 size;
        struct lima_device *dev = ip->dev;
 
@@ -63,15 +89,7 @@ int lima_l2_cache_init(struct lima_ip *ip)
                 1 << (size & 0xff),
                 1 << ((size >> 24) & 0xff));
 
-       err = lima_l2_cache_flush(ip);
-       if (err)
-               return err;
-
-       l2_cache_write(LIMA_L2_CACHE_ENABLE,
-                      LIMA_L2_CACHE_ENABLE_ACCESS|LIMA_L2_CACHE_ENABLE_READ_ALLOCATE);
-       l2_cache_write(LIMA_L2_CACHE_MAX_READS, 0x1c);
-
-       return 0;
+       return lima_l2_cache_hw_init(ip);
 }
 
 void lima_l2_cache_fini(struct lima_ip *ip)
index c63fb676ff1412e9163da5a4a46bb2f961758709..1aeeefd53fb9ecd55d253e2489d506f930f02f83 100644 (file)
@@ -6,6 +6,8 @@
 
 struct lima_ip;
 
+int lima_l2_cache_resume(struct lima_ip *ip);
+void lima_l2_cache_suspend(struct lima_ip *ip);
 int lima_l2_cache_init(struct lima_ip *ip);
 void lima_l2_cache_fini(struct lima_ip *ip);
 
index f79d2af427e77cc173b6377118d5176b53f88360..a1ae6c252dc2b546bad5f12226cad197ff006808 100644 (file)
@@ -59,12 +59,44 @@ static irqreturn_t lima_mmu_irq_handler(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-int lima_mmu_init(struct lima_ip *ip)
+static int lima_mmu_hw_init(struct lima_ip *ip)
 {
        struct lima_device *dev = ip->dev;
        int err;
        u32 v;
 
+       mmu_write(LIMA_MMU_COMMAND, LIMA_MMU_COMMAND_HARD_RESET);
+       err = lima_mmu_send_command(LIMA_MMU_COMMAND_HARD_RESET,
+                                   LIMA_MMU_DTE_ADDR, v, v == 0);
+       if (err)
+               return err;
+
+       mmu_write(LIMA_MMU_INT_MASK,
+                 LIMA_MMU_INT_PAGE_FAULT | LIMA_MMU_INT_READ_BUS_ERROR);
+       mmu_write(LIMA_MMU_DTE_ADDR, dev->empty_vm->pd.dma);
+       return lima_mmu_send_command(LIMA_MMU_COMMAND_ENABLE_PAGING,
+                                    LIMA_MMU_STATUS, v,
+                                    v & LIMA_MMU_STATUS_PAGING_ENABLED);
+}
+
+int lima_mmu_resume(struct lima_ip *ip)
+{
+       if (ip->id == lima_ip_ppmmu_bcast)
+               return 0;
+
+       return lima_mmu_hw_init(ip);
+}
+
+void lima_mmu_suspend(struct lima_ip *ip)
+{
+
+}
+
+int lima_mmu_init(struct lima_ip *ip)
+{
+       struct lima_device *dev = ip->dev;
+       int err;
+
        if (ip->id == lima_ip_ppmmu_bcast)
                return 0;
 
@@ -74,12 +106,6 @@ int lima_mmu_init(struct lima_ip *ip)
                return -EIO;
        }
 
-       mmu_write(LIMA_MMU_COMMAND, LIMA_MMU_COMMAND_HARD_RESET);
-       err = lima_mmu_send_command(LIMA_MMU_COMMAND_HARD_RESET,
-                                   LIMA_MMU_DTE_ADDR, v, v == 0);
-       if (err)
-               return err;
-
        err = devm_request_irq(dev->dev, ip->irq, lima_mmu_irq_handler,
                               IRQF_SHARED, lima_ip_name(ip), ip);
        if (err) {
@@ -87,11 +113,7 @@ int lima_mmu_init(struct lima_ip *ip)
                return err;
        }
 
-       mmu_write(LIMA_MMU_INT_MASK, LIMA_MMU_INT_PAGE_FAULT | LIMA_MMU_INT_READ_BUS_ERROR);
-       mmu_write(LIMA_MMU_DTE_ADDR, dev->empty_vm->pd.dma);
-       return lima_mmu_send_command(LIMA_MMU_COMMAND_ENABLE_PAGING,
-                                    LIMA_MMU_STATUS, v,
-                                    v & LIMA_MMU_STATUS_PAGING_ENABLED);
+       return lima_mmu_hw_init(ip);
 }
 
 void lima_mmu_fini(struct lima_ip *ip)
@@ -113,8 +135,7 @@ void lima_mmu_switch_vm(struct lima_ip *ip, struct lima_vm *vm)
                              LIMA_MMU_STATUS, v,
                              v & LIMA_MMU_STATUS_STALL_ACTIVE);
 
-       if (vm)
-               mmu_write(LIMA_MMU_DTE_ADDR, vm->pd.dma);
+       mmu_write(LIMA_MMU_DTE_ADDR, vm->pd.dma);
 
        /* flush the TLB */
        mmu_write(LIMA_MMU_COMMAND, LIMA_MMU_COMMAND_ZAP_CACHE);
index 4f8ccbebcba147ff023f5f942c7c451caf0658dd..f0c97ac75ea0100b31c81953321a3d99e873800a 100644 (file)
@@ -7,6 +7,8 @@
 struct lima_ip;
 struct lima_vm;
 
+int lima_mmu_resume(struct lima_ip *ip);
+void lima_mmu_suspend(struct lima_ip *ip);
 int lima_mmu_init(struct lima_ip *ip);
 void lima_mmu_fini(struct lima_ip *ip);
 
index 571f6d66158182a369d850da4dfac6a3801407fe..e397e1146e96314808b4964576fc419988b11053 100644 (file)
@@ -21,7 +21,7 @@ static int lima_pmu_wait_cmd(struct lima_ip *ip)
                                 v, v & LIMA_PMU_INT_CMD_MASK,
                                 100, 100000);
        if (err) {
-               dev_err(dev->dev, "timeout wait pmd cmd\n");
+               dev_err(dev->dev, "timeout wait pmu cmd\n");
                return err;
        }
 
@@ -29,7 +29,41 @@ static int lima_pmu_wait_cmd(struct lima_ip *ip)
        return 0;
 }
 
-int lima_pmu_init(struct lima_ip *ip)
+static u32 lima_pmu_get_ip_mask(struct lima_ip *ip)
+{
+       struct lima_device *dev = ip->dev;
+       u32 ret = 0;
+       int i;
+
+       ret |= LIMA_PMU_POWER_GP0_MASK;
+
+       if (dev->id == lima_gpu_mali400) {
+               ret |= LIMA_PMU_POWER_L2_MASK;
+               for (i = 0; i < 4; i++) {
+                       if (dev->ip[lima_ip_pp0 + i].present)
+                               ret |= LIMA_PMU_POWER_PP_MASK(i);
+               }
+       } else {
+               if (dev->ip[lima_ip_pp0].present)
+                       ret |= LIMA450_PMU_POWER_PP0_MASK;
+               for (i = lima_ip_pp1; i <= lima_ip_pp3; i++) {
+                       if (dev->ip[i].present) {
+                               ret |= LIMA450_PMU_POWER_PP13_MASK;
+                               break;
+                       }
+               }
+               for (i = lima_ip_pp4; i <= lima_ip_pp7; i++) {
+                       if (dev->ip[i].present) {
+                               ret |= LIMA450_PMU_POWER_PP47_MASK;
+                               break;
+                       }
+               }
+       }
+
+       return ret;
+}
+
+static int lima_pmu_hw_init(struct lima_ip *ip)
 {
        int err;
        u32 stat;
@@ -54,7 +88,44 @@ int lima_pmu_init(struct lima_ip *ip)
        return 0;
 }
 
-void lima_pmu_fini(struct lima_ip *ip)
+static void lima_pmu_hw_fini(struct lima_ip *ip)
 {
+       u32 stat;
+
+       if (!ip->data.mask)
+               ip->data.mask = lima_pmu_get_ip_mask(ip);
 
+       stat = ~pmu_read(LIMA_PMU_STATUS) & ip->data.mask;
+       if (stat) {
+               pmu_write(LIMA_PMU_POWER_DOWN, stat);
+
+               /* Don't wait for interrupt on Mali400 if all domains are
+                * powered off because the HW won't generate an interrupt
+                * in this case.
+                */
+               if (ip->dev->id == lima_gpu_mali400)
+                       pmu_write(LIMA_PMU_INT_CLEAR, LIMA_PMU_INT_CMD_MASK);
+               else
+                       lima_pmu_wait_cmd(ip);
+       }
+}
+
+int lima_pmu_resume(struct lima_ip *ip)
+{
+       return lima_pmu_hw_init(ip);
+}
+
+void lima_pmu_suspend(struct lima_ip *ip)
+{
+       lima_pmu_hw_fini(ip);
+}
+
+int lima_pmu_init(struct lima_ip *ip)
+{
+       return lima_pmu_hw_init(ip);
+}
+
+void lima_pmu_fini(struct lima_ip *ip)
+{
+       lima_pmu_hw_fini(ip);
 }
index a2a18775eb07ddcf833723f95d0443fccfa0e1a1..652dc7af30473fff55c8b709083f6bf13907b996 100644 (file)
@@ -6,6 +6,8 @@
 
 struct lima_ip;
 
+int lima_pmu_resume(struct lima_ip *ip);
+void lima_pmu_suspend(struct lima_ip *ip);
 int lima_pmu_init(struct lima_ip *ip);
 void lima_pmu_fini(struct lima_ip *ip);
 
index 8fef224b93c8568fc70bd483739a7cd84d5e49e6..33f01383409c0e80f8f0fd8805ce06f35cb02a77 100644 (file)
@@ -223,6 +223,23 @@ static void lima_pp_print_version(struct lima_ip *ip)
                 lima_ip_name(ip), name, major, minor);
 }
 
+static int lima_pp_hw_init(struct lima_ip *ip)
+{
+       ip->data.async_reset = false;
+       lima_pp_soft_reset_async(ip);
+       return lima_pp_soft_reset_async_wait(ip);
+}
+
+int lima_pp_resume(struct lima_ip *ip)
+{
+       return lima_pp_hw_init(ip);
+}
+
+void lima_pp_suspend(struct lima_ip *ip)
+{
+
+}
+
 int lima_pp_init(struct lima_ip *ip)
 {
        struct lima_device *dev = ip->dev;
@@ -230,9 +247,7 @@ int lima_pp_init(struct lima_ip *ip)
 
        lima_pp_print_version(ip);
 
-       ip->data.async_reset = false;
-       lima_pp_soft_reset_async(ip);
-       err = lima_pp_soft_reset_async_wait(ip);
+       err = lima_pp_hw_init(ip);
        if (err)
                return err;
 
@@ -254,6 +269,16 @@ void lima_pp_fini(struct lima_ip *ip)
 
 }
 
+int lima_pp_bcast_resume(struct lima_ip *ip)
+{
+       return 0;
+}
+
+void lima_pp_bcast_suspend(struct lima_ip *ip)
+{
+
+}
+
 int lima_pp_bcast_init(struct lima_ip *ip)
 {
        struct lima_device *dev = ip->dev;
index bf60c77b26338f1f181d1fa95437111fe9476f18..16ec96de15a982d6c4f5b4343f6d4a4fcb4a3efe 100644 (file)
@@ -7,9 +7,13 @@
 struct lima_ip;
 struct lima_device;
 
+int lima_pp_resume(struct lima_ip *ip);
+void lima_pp_suspend(struct lima_ip *ip);
 int lima_pp_init(struct lima_ip *ip);
 void lima_pp_fini(struct lima_ip *ip);
 
+int lima_pp_bcast_resume(struct lima_ip *ip);
+void lima_pp_bcast_suspend(struct lima_ip *ip);
 int lima_pp_bcast_init(struct lima_ip *ip);
 void lima_pp_bcast_fini(struct lima_ip *ip);
 
index 3886999b453303a58b5b2935d92f30f10bab4ee7..e6cefda0027959d169d059ecb99d959945792a17 100644 (file)
@@ -3,14 +3,17 @@
 
 #include <linux/kthread.h>
 #include <linux/slab.h>
-#include <linux/xarray.h>
+#include <linux/vmalloc.h>
+#include <linux/pm_runtime.h>
 
+#include "lima_devfreq.h"
 #include "lima_drv.h"
 #include "lima_sched.h"
 #include "lima_vm.h"
 #include "lima_mmu.h"
 #include "lima_l2_cache.h"
 #include "lima_gem.h"
+#include "lima_trace.h"
 
 struct lima_fence {
        struct dma_fence base;
@@ -176,6 +179,7 @@ struct dma_fence *lima_sched_context_queue_task(struct lima_sched_context *conte
 {
        struct dma_fence *fence = dma_fence_get(&task->base.s_fence->finished);
 
+       trace_lima_task_submit(task);
        drm_sched_entity_push_job(&task->base, &context->base);
        return fence;
 }
@@ -191,14 +195,36 @@ static struct dma_fence *lima_sched_dependency(struct drm_sched_job *job,
        return NULL;
 }
 
+static int lima_pm_busy(struct lima_device *ldev)
+{
+       int ret;
+
+       /* resume GPU if it has been suspended by runtime PM */
+       ret = pm_runtime_get_sync(ldev->dev);
+       if (ret < 0)
+               return ret;
+
+       lima_devfreq_record_busy(&ldev->devfreq);
+       return 0;
+}
+
+static void lima_pm_idle(struct lima_device *ldev)
+{
+       lima_devfreq_record_idle(&ldev->devfreq);
+
+       /* GPU can do auto runtime suspend */
+       pm_runtime_mark_last_busy(ldev->dev);
+       pm_runtime_put_autosuspend(ldev->dev);
+}
+
 static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job)
 {
        struct lima_sched_task *task = to_lima_task(job);
        struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
+       struct lima_device *ldev = pipe->ldev;
        struct lima_fence *fence;
        struct dma_fence *ret;
-       struct lima_vm *vm = NULL, *last_vm = NULL;
-       int i;
+       int i, err;
 
        /* after GPU reset */
        if (job->s_fence->finished.error < 0)
@@ -207,6 +233,13 @@ static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job)
        fence = lima_fence_create(pipe);
        if (!fence)
                return NULL;
+
+       err = lima_pm_busy(ldev);
+       if (err < 0) {
+               dma_fence_put(&fence->base);
+               return NULL;
+       }
+
        task->fence = &fence->base;
 
        /* for caller usage of the fence, otherwise irq handler
@@ -234,21 +267,17 @@ static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job)
        for (i = 0; i < pipe->num_l2_cache; i++)
                lima_l2_cache_flush(pipe->l2_cache[i]);
 
-       if (task->vm != pipe->current_vm) {
-               vm = lima_vm_get(task->vm);
-               last_vm = pipe->current_vm;
-               pipe->current_vm = task->vm;
-       }
+       lima_vm_put(pipe->current_vm);
+       pipe->current_vm = lima_vm_get(task->vm);
 
        if (pipe->bcast_mmu)
-               lima_mmu_switch_vm(pipe->bcast_mmu, vm);
+               lima_mmu_switch_vm(pipe->bcast_mmu, pipe->current_vm);
        else {
                for (i = 0; i < pipe->num_mmu; i++)
-                       lima_mmu_switch_vm(pipe->mmu[i], vm);
+                       lima_mmu_switch_vm(pipe->mmu[i], pipe->current_vm);
        }
 
-       if (last_vm)
-               lima_vm_put(last_vm);
+       trace_lima_task_run(task);
 
        pipe->error = false;
        pipe->task_run(pipe, task);
@@ -256,10 +285,139 @@ static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job)
        return task->fence;
 }
 
+static void lima_sched_build_error_task_list(struct lima_sched_task *task)
+{
+       struct lima_sched_error_task *et;
+       struct lima_sched_pipe *pipe = to_lima_pipe(task->base.sched);
+       struct lima_ip *ip = pipe->processor[0];
+       int pipe_id = ip->id == lima_ip_gp ? lima_pipe_gp : lima_pipe_pp;
+       struct lima_device *dev = ip->dev;
+       struct lima_sched_context *sched_ctx =
+               container_of(task->base.entity,
+                            struct lima_sched_context, base);
+       struct lima_ctx *ctx =
+               container_of(sched_ctx, struct lima_ctx, context[pipe_id]);
+       struct lima_dump_task *dt;
+       struct lima_dump_chunk *chunk;
+       struct lima_dump_chunk_pid *pid_chunk;
+       struct lima_dump_chunk_buffer *buffer_chunk;
+       u32 size, task_size, mem_size;
+       int i;
+
+       mutex_lock(&dev->error_task_list_lock);
+
+       if (dev->dump.num_tasks >= lima_max_error_tasks) {
+               dev_info(dev->dev, "fail to save task state from %s pid %d: "
+                        "error task list is full\n", ctx->pname, ctx->pid);
+               goto out;
+       }
+
+       /* frame chunk */
+       size = sizeof(struct lima_dump_chunk) + pipe->frame_size;
+       /* process name chunk */
+       size += sizeof(struct lima_dump_chunk) + sizeof(ctx->pname);
+       /* pid chunk */
+       size += sizeof(struct lima_dump_chunk);
+       /* buffer chunks */
+       for (i = 0; i < task->num_bos; i++) {
+               struct lima_bo *bo = task->bos[i];
+
+               size += sizeof(struct lima_dump_chunk);
+               size += bo->heap_size ? bo->heap_size : lima_bo_size(bo);
+       }
+
+       task_size = size + sizeof(struct lima_dump_task);
+       mem_size = task_size + sizeof(*et);
+       et = kvmalloc(mem_size, GFP_KERNEL);
+       if (!et) {
+               dev_err(dev->dev, "fail to alloc task dump buffer of size %x\n",
+                       mem_size);
+               goto out;
+       }
+
+       et->data = et + 1;
+       et->size = task_size;
+
+       dt = et->data;
+       memset(dt, 0, sizeof(*dt));
+       dt->id = pipe_id;
+       dt->size = size;
+
+       chunk = (struct lima_dump_chunk *)(dt + 1);
+       memset(chunk, 0, sizeof(*chunk));
+       chunk->id = LIMA_DUMP_CHUNK_FRAME;
+       chunk->size = pipe->frame_size;
+       memcpy(chunk + 1, task->frame, pipe->frame_size);
+       dt->num_chunks++;
+
+       chunk = (void *)(chunk + 1) + chunk->size;
+       memset(chunk, 0, sizeof(*chunk));
+       chunk->id = LIMA_DUMP_CHUNK_PROCESS_NAME;
+       chunk->size = sizeof(ctx->pname);
+       memcpy(chunk + 1, ctx->pname, sizeof(ctx->pname));
+       dt->num_chunks++;
+
+       pid_chunk = (void *)(chunk + 1) + chunk->size;
+       memset(pid_chunk, 0, sizeof(*pid_chunk));
+       pid_chunk->id = LIMA_DUMP_CHUNK_PROCESS_ID;
+       pid_chunk->pid = ctx->pid;
+       dt->num_chunks++;
+
+       buffer_chunk = (void *)(pid_chunk + 1) + pid_chunk->size;
+       for (i = 0; i < task->num_bos; i++) {
+               struct lima_bo *bo = task->bos[i];
+               void *data;
+
+               memset(buffer_chunk, 0, sizeof(*buffer_chunk));
+               buffer_chunk->id = LIMA_DUMP_CHUNK_BUFFER;
+               buffer_chunk->va = lima_vm_get_va(task->vm, bo);
+
+               if (bo->heap_size) {
+                       buffer_chunk->size = bo->heap_size;
+
+                       data = vmap(bo->base.pages, bo->heap_size >> PAGE_SHIFT,
+                                   VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+                       if (!data) {
+                               kvfree(et);
+                               goto out;
+                       }
+
+                       memcpy(buffer_chunk + 1, data, buffer_chunk->size);
+
+                       vunmap(data);
+               } else {
+                       buffer_chunk->size = lima_bo_size(bo);
+
+                       data = drm_gem_shmem_vmap(&bo->base.base);
+                       if (IS_ERR_OR_NULL(data)) {
+                               kvfree(et);
+                               goto out;
+                       }
+
+                       memcpy(buffer_chunk + 1, data, buffer_chunk->size);
+
+                       drm_gem_shmem_vunmap(&bo->base.base, data);
+               }
+
+               buffer_chunk = (void *)(buffer_chunk + 1) + buffer_chunk->size;
+               dt->num_chunks++;
+       }
+
+       list_add(&et->list, &dev->error_task_list);
+       dev->dump.size += et->size;
+       dev->dump.num_tasks++;
+
+       dev_info(dev->dev, "save error task state success\n");
+
+out:
+       mutex_unlock(&dev->error_task_list_lock);
+}
+
 static void lima_sched_timedout_job(struct drm_sched_job *job)
 {
        struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
        struct lima_sched_task *task = to_lima_task(job);
+       struct lima_device *ldev = pipe->ldev;
 
        if (!pipe->error)
                DRM_ERROR("lima job timeout\n");
@@ -268,6 +426,8 @@ static void lima_sched_timedout_job(struct drm_sched_job *job)
 
        drm_sched_increase_karma(&task->base);
 
+       lima_sched_build_error_task_list(task);
+
        pipe->task_error(pipe);
 
        if (pipe->bcast_mmu)
@@ -279,12 +439,12 @@ static void lima_sched_timedout_job(struct drm_sched_job *job)
                        lima_mmu_page_fault_resume(pipe->mmu[i]);
        }
 
-       if (pipe->current_vm)
-               lima_vm_put(pipe->current_vm);
-
+       lima_vm_put(pipe->current_vm);
        pipe->current_vm = NULL;
        pipe->current_task = NULL;
 
+       lima_pm_idle(ldev);
+
        drm_sched_resubmit_jobs(&pipe->base);
        drm_sched_start(&pipe->base, true);
 }
@@ -355,6 +515,7 @@ void lima_sched_pipe_fini(struct lima_sched_pipe *pipe)
 void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe)
 {
        struct lima_sched_task *task = pipe->current_task;
+       struct lima_device *ldev = pipe->ldev;
 
        if (pipe->error) {
                if (task && task->recoverable)
@@ -364,5 +525,7 @@ void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe)
        } else {
                pipe->task_fini(pipe);
                dma_fence_signal(task->fence);
+
+               lima_pm_idle(ldev);
        }
 }
index d64393fb50a9b667ab05eca7fb4ec3712daf5824..90f03c48ef4a89323cb0f06f0ab579f20b477ef2 100644 (file)
@@ -5,9 +5,18 @@
 #define __LIMA_SCHED_H__
 
 #include <drm/gpu_scheduler.h>
+#include <linux/list.h>
+#include <linux/xarray.h>
 
+struct lima_device;
 struct lima_vm;
 
+struct lima_sched_error_task {
+       struct list_head list;
+       void *data;
+       u32 size;
+};
+
 struct lima_sched_task {
        struct drm_sched_job base;
 
@@ -44,6 +53,8 @@ struct lima_sched_pipe {
        u32 fence_seqno;
        spinlock_t fence_lock;
 
+       struct lima_device *ldev;
+
        struct lima_sched_task *current_task;
        struct lima_vm *current_vm;
 
diff --git a/drivers/gpu/drm/lima/lima_trace.c b/drivers/gpu/drm/lima/lima_trace.c
new file mode 100644 (file)
index 0000000..ea1c728
--- /dev/null
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2020 Qiang Yu <yuq825@gmail.com> */
+
+#include "lima_sched.h"
+
+#define CREATE_TRACE_POINTS
+#include "lima_trace.h"
diff --git a/drivers/gpu/drm/lima/lima_trace.h b/drivers/gpu/drm/lima/lima_trace.h
new file mode 100644 (file)
index 0000000..3a430e9
--- /dev/null
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2020 Qiang Yu <yuq825@gmail.com> */
+
+#if !defined(_LIMA_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _LIMA_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM lima
+#define TRACE_INCLUDE_FILE lima_trace
+
+DECLARE_EVENT_CLASS(lima_task,
+       TP_PROTO(struct lima_sched_task *task),
+       TP_ARGS(task),
+       TP_STRUCT__entry(
+               __field(uint64_t, task_id)
+               __field(unsigned int, context)
+               __field(unsigned int, seqno)
+               __string(pipe, task->base.sched->name)
+               ),
+
+       TP_fast_assign(
+               __entry->task_id = task->base.id;
+               __entry->context = task->base.s_fence->finished.context;
+               __entry->seqno = task->base.s_fence->finished.seqno;
+               __assign_str(pipe, task->base.sched->name)
+               ),
+
+       TP_printk("task=%llu, context=%u seqno=%u pipe=%s",
+                 __entry->task_id, __entry->context, __entry->seqno,
+                 __get_str(pipe))
+);
+
+DEFINE_EVENT(lima_task, lima_task_submit,
+            TP_PROTO(struct lima_sched_task *task),
+            TP_ARGS(task)
+);
+
+DEFINE_EVENT(lima_task, lima_task_run,
+            TP_PROTO(struct lima_sched_task *task),
+            TP_ARGS(task)
+);
+
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/lima
+#include <trace/define_trace.h>
index 22aeec77d84d3e1c0c11d51c3391d1019a64fffd..3a7c74822d8b2b0e584a3777792a0c860c535b21 100644 (file)
@@ -54,7 +54,8 @@ static inline struct lima_vm *lima_vm_get(struct lima_vm *vm)
 
 static inline void lima_vm_put(struct lima_vm *vm)
 {
-       kref_put(&vm->refcount, lima_vm_release);
+       if (vm)
+               kref_put(&vm->refcount, lima_vm_release);
 }
 
 void lima_vm_print(struct lima_vm *vm);
index e59907e688541a2ff5dfc6904e2a9980908d376f..04e1d38d41f79968f30e8a1298dd41211f871e25 100644 (file)
@@ -948,7 +948,7 @@ static void mcde_display_disable(struct drm_simple_display_pipe *pipe)
 {
        struct drm_crtc *crtc = &pipe->crtc;
        struct drm_device *drm = crtc->dev;
-       struct mcde *mcde = drm->dev_private;
+       struct mcde *mcde = to_mcde(drm);
        struct drm_pending_vblank_event *event;
 
        drm_crtc_vblank_off(crtc);
@@ -1020,7 +1020,7 @@ static void mcde_display_update(struct drm_simple_display_pipe *pipe,
 {
        struct drm_crtc *crtc = &pipe->crtc;
        struct drm_device *drm = crtc->dev;
-       struct mcde *mcde = drm->dev_private;
+       struct mcde *mcde = to_mcde(drm);
        struct drm_pending_vblank_event *event = crtc->state->event;
        struct drm_plane *plane = &pipe->plane;
        struct drm_plane_state *pstate = plane->state;
@@ -1078,7 +1078,7 @@ static int mcde_display_enable_vblank(struct drm_simple_display_pipe *pipe)
 {
        struct drm_crtc *crtc = &pipe->crtc;
        struct drm_device *drm = crtc->dev;
-       struct mcde *mcde = drm->dev_private;
+       struct mcde *mcde = to_mcde(drm);
        u32 val;
 
        /* Enable all VBLANK IRQs */
@@ -1097,7 +1097,7 @@ static void mcde_display_disable_vblank(struct drm_simple_display_pipe *pipe)
 {
        struct drm_crtc *crtc = &pipe->crtc;
        struct drm_device *drm = crtc->dev;
-       struct mcde *mcde = drm->dev_private;
+       struct mcde *mcde = to_mcde(drm);
 
        /* Disable all VBLANK IRQs */
        writel(0, mcde->regs + MCDE_IMSCPP);
@@ -1117,7 +1117,7 @@ static struct drm_simple_display_pipe_funcs mcde_display_funcs = {
 
 int mcde_display_init(struct drm_device *drm)
 {
-       struct mcde *mcde = drm->dev_private;
+       struct mcde *mcde = to_mcde(drm);
        int ret;
        static const u32 formats[] = {
                DRM_FORMAT_ARGB8888,
index 80edd6628979c0d4d9224a6e145a2a5c7d6d97fe..679c2c4e6d9de63ca9aff17d9782ded83dd36712 100644 (file)
@@ -34,6 +34,8 @@ struct mcde {
        struct regulator *vana;
 };
 
+#define to_mcde(dev) container_of(dev, struct mcde, drm)
+
 bool mcde_dsi_irq(struct mipi_dsi_device *mdsi);
 void mcde_dsi_te_request(struct mipi_dsi_device *mdsi);
 extern struct platform_driver mcde_dsi_driver;
index f28cb7a576ba40f08c8fc448ff0e69c76d2bbd81..84f3e2dbd77bde90f94a5b394e482336685a093c 100644 (file)
@@ -72,6 +72,7 @@
 #include <drm/drm_gem.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_of.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_panel.h>
@@ -163,7 +164,7 @@ static irqreturn_t mcde_irq(int irq, void *data)
 static int mcde_modeset_init(struct drm_device *drm)
 {
        struct drm_mode_config *mode_config;
-       struct mcde *mcde = drm->dev_private;
+       struct mcde *mcde = to_mcde(drm);
        int ret;
 
        if (!mcde->bridge) {
@@ -183,13 +184,13 @@ static int mcde_modeset_init(struct drm_device *drm)
        ret = drm_vblank_init(drm, 1);
        if (ret) {
                dev_err(drm->dev, "failed to init vblank\n");
-               goto out_config;
+               return ret;
        }
 
        ret = mcde_display_init(drm);
        if (ret) {
                dev_err(drm->dev, "failed to init display\n");
-               goto out_config;
+               return ret;
        }
 
        /*
@@ -203,7 +204,7 @@ static int mcde_modeset_init(struct drm_device *drm)
                                                    mcde->bridge);
        if (ret) {
                dev_err(drm->dev, "failed to attach display output bridge\n");
-               goto out_config;
+               return ret;
        }
 
        drm_mode_config_reset(drm);
@@ -211,19 +212,6 @@ static int mcde_modeset_init(struct drm_device *drm)
        drm_fbdev_generic_setup(drm, 32);
 
        return 0;
-
-out_config:
-       drm_mode_config_cleanup(drm);
-       return ret;
-}
-
-static void mcde_release(struct drm_device *drm)
-{
-       struct mcde *mcde = drm->dev_private;
-
-       drm_mode_config_cleanup(drm);
-       drm_dev_fini(drm);
-       kfree(mcde);
 }
 
 DEFINE_DRM_GEM_CMA_FOPS(drm_fops);
@@ -231,7 +219,6 @@ DEFINE_DRM_GEM_CMA_FOPS(drm_fops);
 static struct drm_driver mcde_drm_driver = {
        .driver_features =
                DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
-       .release = mcde_release,
        .lastclose = drm_fb_helper_lastclose,
        .ioctls = NULL,
        .fops = &drm_fops,
@@ -259,7 +246,9 @@ static int mcde_drm_bind(struct device *dev)
        struct drm_device *drm = dev_get_drvdata(dev);
        int ret;
 
-       drm_mode_config_init(drm);
+       ret = drmm_mode_config_init(drm);
+       if (ret)
+               return ret;
 
        ret = component_bind_all(drm->dev, drm);
        if (ret) {
@@ -318,35 +307,27 @@ static int mcde_probe(struct platform_device *pdev)
        int ret;
        int i;
 
-       mcde = kzalloc(sizeof(*mcde), GFP_KERNEL);
-       if (!mcde)
-               return -ENOMEM;
-       mcde->dev = dev;
-
-       ret = drm_dev_init(&mcde->drm, &mcde_drm_driver, dev);
-       if (ret) {
-               kfree(mcde);
-               return ret;
-       }
+       mcde = devm_drm_dev_alloc(dev, &mcde_drm_driver, struct mcde, drm);
+       if (IS_ERR(mcde))
+               return PTR_ERR(mcde);
        drm = &mcde->drm;
-       drm->dev_private = mcde;
+       mcde->dev = dev;
        platform_set_drvdata(pdev, drm);
 
        /* Enable continuous updates: this is what Linux' framebuffer expects */
        mcde->oneshot_mode = false;
-       drm->dev_private = mcde;
 
        /* First obtain and turn on the main power */
        mcde->epod = devm_regulator_get(dev, "epod");
        if (IS_ERR(mcde->epod)) {
                ret = PTR_ERR(mcde->epod);
                dev_err(dev, "can't get EPOD regulator\n");
-               goto dev_unref;
+               return ret;
        }
        ret = regulator_enable(mcde->epod);
        if (ret) {
                dev_err(dev, "can't enable EPOD regulator\n");
-               goto dev_unref;
+               return ret;
        }
        mcde->vana = devm_regulator_get(dev, "vana");
        if (IS_ERR(mcde->vana)) {
@@ -497,8 +478,6 @@ regulator_off:
        regulator_disable(mcde->vana);
 regulator_epod_off:
        regulator_disable(mcde->epod);
-dev_unref:
-       drm_dev_put(drm);
        return ret;
 
 }
@@ -506,13 +485,12 @@ dev_unref:
 static int mcde_remove(struct platform_device *pdev)
 {
        struct drm_device *drm = platform_get_drvdata(pdev);
-       struct mcde *mcde = drm->dev_private;
+       struct mcde *mcde = to_mcde(drm);
 
        component_master_del(&pdev->dev, &mcde_drm_comp_ops);
        clk_disable_unprepare(mcde->mcde_clk);
        regulator_disable(mcde->vana);
        regulator_disable(mcde->epod);
-       drm_dev_put(drm);
 
        return 0;
 }
index 7af5ebb0c43689776f13b37a2f41a5f6a972dc35..f303369305a3cda768b183034bffdeaf7cc61f5d 100644 (file)
@@ -1020,7 +1020,7 @@ static int mcde_dsi_bind(struct device *dev, struct device *master,
                         void *data)
 {
        struct drm_device *drm = data;
-       struct mcde *mcde = drm->dev_private;
+       struct mcde *mcde = to_mcde(drm);
        struct mcde_dsi *d = dev_get_drvdata(dev);
        struct device_node *child;
        struct drm_panel *panel = NULL;
@@ -1073,10 +1073,9 @@ static int mcde_dsi_bind(struct device *dev, struct device *master,
                        panel = NULL;
 
                        bridge = of_drm_find_bridge(child);
-                       if (IS_ERR(bridge)) {
-                               dev_err(dev, "failed to find bridge (%ld)\n",
-                                       PTR_ERR(bridge));
-                               return PTR_ERR(bridge);
+                       if (!bridge) {
+                               dev_err(dev, "failed to find bridge\n");
+                               return -EINVAL;
                        }
                }
        }
index 4f0ce4cd5b8ca3d21b139a0f8550ec0401e2e338..945c3ac92998483b77413efb6c51d3d9f9249d1c 100644 (file)
@@ -10,7 +10,9 @@
 #include <linux/kernel.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
+#include <linux/of_gpio.h>
 #include <linux/of_graph.h>
+#include <linux/pinctrl/consumer.h>
 #include <linux/platform_device.h>
 #include <linux/types.h>
 
@@ -20,6 +22,7 @@
 #include <drm/drm_bridge.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_of.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "mtk_dpi_regs.h"
 #include "mtk_drm_ddp_comp.h"
@@ -74,6 +77,9 @@ struct mtk_dpi {
        enum mtk_dpi_out_yc_map yc_map;
        enum mtk_dpi_out_bit_num bit_num;
        enum mtk_dpi_out_channel_swap channel_swap;
+       struct pinctrl *pinctrl;
+       struct pinctrl_state *pins_gpio;
+       struct pinctrl_state *pins_dpi;
        int refcount;
 };
 
@@ -379,6 +385,9 @@ static void mtk_dpi_power_off(struct mtk_dpi *dpi)
        if (--dpi->refcount != 0)
                return;
 
+       if (dpi->pinctrl && dpi->pins_gpio)
+               pinctrl_select_state(dpi->pinctrl, dpi->pins_gpio);
+
        mtk_dpi_disable(dpi);
        clk_disable_unprepare(dpi->pixel_clk);
        clk_disable_unprepare(dpi->engine_clk);
@@ -403,6 +412,9 @@ static int mtk_dpi_power_on(struct mtk_dpi *dpi)
                goto err_pixel;
        }
 
+       if (dpi->pinctrl && dpi->pins_dpi)
+               pinctrl_select_state(dpi->pinctrl, dpi->pins_dpi);
+
        mtk_dpi_enable(dpi);
        return 0;
 
@@ -509,15 +521,6 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
        return 0;
 }
 
-static void mtk_dpi_encoder_destroy(struct drm_encoder *encoder)
-{
-       drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs mtk_dpi_encoder_funcs = {
-       .destroy = mtk_dpi_encoder_destroy,
-};
-
 static bool mtk_dpi_encoder_mode_fixup(struct drm_encoder *encoder,
                                       const struct drm_display_mode *mode,
                                       struct drm_display_mode *adjusted_mode)
@@ -596,8 +599,8 @@ static int mtk_dpi_bind(struct device *dev, struct device *master, void *data)
                return ret;
        }
 
-       ret = drm_encoder_init(drm_dev, &dpi->encoder, &mtk_dpi_encoder_funcs,
-                              DRM_MODE_ENCODER_TMDS, NULL);
+       ret = drm_simple_encoder_init(drm_dev, &dpi->encoder,
+                                     DRM_MODE_ENCODER_TMDS);
        if (ret) {
                dev_err(dev, "Failed to initialize decoder: %d\n", ret);
                goto err_unregister;
@@ -705,6 +708,26 @@ static int mtk_dpi_probe(struct platform_device *pdev)
        dpi->dev = dev;
        dpi->conf = (struct mtk_dpi_conf *)of_device_get_match_data(dev);
 
+       dpi->pinctrl = devm_pinctrl_get(&pdev->dev);
+       if (IS_ERR(dpi->pinctrl)) {
+               dpi->pinctrl = NULL;
+               dev_dbg(&pdev->dev, "Cannot find pinctrl!\n");
+       }
+       if (dpi->pinctrl) {
+               dpi->pins_gpio = pinctrl_lookup_state(dpi->pinctrl, "sleep");
+               if (IS_ERR(dpi->pins_gpio)) {
+                       dpi->pins_gpio = NULL;
+                       dev_dbg(&pdev->dev, "Cannot find pinctrl idle!\n");
+               }
+               if (dpi->pins_gpio)
+                       pinctrl_select_state(dpi->pinctrl, dpi->pins_gpio);
+
+               dpi->pins_dpi = pinctrl_lookup_state(dpi->pinctrl, "default");
+               if (IS_ERR(dpi->pins_dpi)) {
+                       dpi->pins_dpi = NULL;
+                       dev_dbg(&pdev->dev, "Cannot find pinctrl active!\n");
+               }
+       }
        mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        dpi->regs = devm_ioremap_resource(dev, mem);
        if (IS_ERR(dpi->regs)) {
index 0563c6813333ecf70fd6264065bf79cbae801b24..ce570283b55f7a140429d88e6b0e8f136ed6540f 100644 (file)
@@ -162,7 +162,9 @@ static int mtk_drm_kms_init(struct drm_device *drm)
        }
        private->mutex_dev = &pdev->dev;
 
-       drm_mode_config_init(drm);
+       ret = drmm_mode_config_init(drm);
+       if (ret)
+               return ret;
 
        drm->mode_config.min_width = 64;
        drm->mode_config.min_height = 64;
@@ -179,7 +181,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
 
        ret = component_bind_all(drm->dev, drm);
        if (ret)
-               goto err_config_cleanup;
+               return ret;
 
        /*
         * We currently support two fixed data streams, each optional,
@@ -255,8 +257,6 @@ err_unset_dma_parms:
                dma_dev->dma_parms = NULL;
 err_component_unbind:
        component_unbind_all(drm->dev, drm);
-err_config_cleanup:
-       drm_mode_config_cleanup(drm);
 
        return ret;
 }
@@ -272,7 +272,6 @@ static void mtk_drm_kms_deinit(struct drm_device *drm)
                private->dma_dev->dma_parms = NULL;
 
        component_unbind_all(drm->dev, drm);
-       drm_mode_config_cleanup(drm);
 }
 
 static const struct file_operations mtk_drm_fops = {
@@ -348,9 +347,7 @@ static int mtk_drm_bind(struct device *dev)
        if (ret < 0)
                goto err_deinit;
 
-       ret = drm_fbdev_generic_setup(drm, 32);
-       if (ret)
-               DRM_ERROR("Failed to initialize fbdev: %d\n", ret);
+       drm_fbdev_generic_setup(drm, 32);
 
        return 0;
 
index b04a3c2b111e09f7dcebb6bbc628a0f8bf50915a..f8fd8b98c30e3d5b6a72930c51bc43090dfe82b3 100644 (file)
@@ -224,6 +224,9 @@ struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
 
        expected = sg_dma_address(sg->sgl);
        for_each_sg(sg->sgl, s, sg->nents, i) {
+               if (!sg_dma_len(s))
+                       break;
+
                if (sg_dma_address(s) != expected) {
                        DRM_ERROR("sg_table is not contiguous");
                        ret = -EINVAL;
index 0ede69830a9dd0c170de57c263f9b89b722fb1f1..a9a25087112fd4fa9813748c27f19ebad32b4967 100644 (file)
@@ -22,6 +22,7 @@
 #include <drm/drm_panel.h>
 #include <drm/drm_print.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "mtk_drm_ddp_comp.h"
 
@@ -787,15 +788,6 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
        dsi->enabled = false;
 }
 
-static void mtk_dsi_encoder_destroy(struct drm_encoder *encoder)
-{
-       drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs mtk_dsi_encoder_funcs = {
-       .destroy = mtk_dsi_encoder_destroy,
-};
-
 static bool mtk_dsi_encoder_mode_fixup(struct drm_encoder *encoder,
                                       const struct drm_display_mode *mode,
                                       struct drm_display_mode *adjusted_mode)
@@ -888,8 +880,8 @@ static int mtk_dsi_create_conn_enc(struct drm_device *drm, struct mtk_dsi *dsi)
 {
        int ret;
 
-       ret = drm_encoder_init(drm, &dsi->encoder, &mtk_dsi_encoder_funcs,
-                              DRM_MODE_ENCODER_DSI, NULL);
+       ret = drm_simple_encoder_init(drm, &dsi->encoder,
+                                     DRM_MODE_ENCODER_DSI);
        if (ret) {
                DRM_ERROR("Failed to encoder init to drm\n");
                return ret;
index ff43a3d80410f2989b38e05a08ad34d3a3d12726..7bc086ec74f7fe3688ba2dfff1a2417f3161a8eb 100644 (file)
@@ -311,14 +311,10 @@ static void mtk_hdmi_hw_send_info_frame(struct mtk_hdmi *hdmi, u8 *buffer,
        u8 checksum;
        int ctrl_frame_en = 0;
 
-       frame_type = *buffer;
-       buffer += 1;
-       frame_ver = *buffer;
-       buffer += 1;
-       frame_len = *buffer;
-       buffer += 1;
-       checksum = *buffer;
-       buffer += 1;
+       frame_type = *buffer++;
+       frame_ver = *buffer++;
+       frame_len = *buffer++;
+       checksum = *buffer++;
        frame_data = buffer;
 
        dev_dbg(hdmi->dev,
@@ -982,7 +978,7 @@ static int mtk_hdmi_setup_avi_infoframe(struct mtk_hdmi *hdmi,
                                        struct drm_display_mode *mode)
 {
        struct hdmi_avi_infoframe frame;
-       u8 buffer[17];
+       u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
        ssize_t err;
 
        err = drm_hdmi_avi_infoframe_from_display_mode(&frame,
@@ -1008,7 +1004,7 @@ static int mtk_hdmi_setup_spd_infoframe(struct mtk_hdmi *hdmi,
                                        const char *product)
 {
        struct hdmi_spd_infoframe frame;
-       u8 buffer[29];
+       u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_SPD_INFOFRAME_SIZE];
        ssize_t err;
 
        err = hdmi_spd_infoframe_init(&frame, vendor, product);
@@ -1031,7 +1027,7 @@ static int mtk_hdmi_setup_spd_infoframe(struct mtk_hdmi *hdmi,
 static int mtk_hdmi_setup_audio_infoframe(struct mtk_hdmi *hdmi)
 {
        struct hdmi_audio_infoframe frame;
-       u8 buffer[14];
+       u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AUDIO_INFOFRAME_SIZE];
        ssize_t err;
 
        err = hdmi_audio_infoframe_init(&frame);
index e4d34484ecc82bd2d3874f8fb2219c33c4c768e3..8cee2591e7284ba06e4a02475eba09331e47e1a2 100644 (file)
@@ -88,6 +88,44 @@ static const struct phy_ops mtk_mipi_tx_ops = {
        .owner = THIS_MODULE,
 };
 
+static void mtk_mipi_tx_get_calibration_datal(struct mtk_mipi_tx *mipi_tx)
+{
+       struct nvmem_cell *cell;
+       size_t len;
+       u32 *buf;
+
+       cell = nvmem_cell_get(mipi_tx->dev, "calibration-data");
+       if (IS_ERR(cell)) {
+               dev_info(mipi_tx->dev, "can't get nvmem_cell_get, ignore it\n");
+               return;
+       }
+       buf = (u32 *)nvmem_cell_read(cell, &len);
+       nvmem_cell_put(cell);
+
+       if (IS_ERR(buf)) {
+               dev_info(mipi_tx->dev, "can't get data, ignore it\n");
+               return;
+       }
+
+       if (len < 3 * sizeof(u32)) {
+               dev_info(mipi_tx->dev, "invalid calibration data\n");
+               kfree(buf);
+               return;
+       }
+
+       mipi_tx->rt_code[0] = ((buf[0] >> 6 & 0x1f) << 5) |
+                              (buf[0] >> 11 & 0x1f);
+       mipi_tx->rt_code[1] = ((buf[1] >> 27 & 0x1f) << 5) |
+                              (buf[0] >> 1 & 0x1f);
+       mipi_tx->rt_code[2] = ((buf[1] >> 17 & 0x1f) << 5) |
+                              (buf[1] >> 22 & 0x1f);
+       mipi_tx->rt_code[3] = ((buf[1] >> 7 & 0x1f) << 5) |
+                              (buf[1] >> 12 & 0x1f);
+       mipi_tx->rt_code[4] = ((buf[2] >> 27 & 0x1f) << 5) |
+                              (buf[1] >> 2 & 0x1f);
+       kfree(buf);
+}
+
 static int mtk_mipi_tx_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
@@ -125,6 +163,20 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
                return ret;
        }
 
+       ret = of_property_read_u32(dev->of_node, "drive-strength-microamp",
+                                  &mipi_tx->mipitx_drive);
+       /* If can't get the "mipi_tx->mipitx_drive", set it default 0x8 */
+       if (ret < 0)
+               mipi_tx->mipitx_drive = 4600;
+
+       /* check the mipitx_drive valid */
+       if (mipi_tx->mipitx_drive > 6000 || mipi_tx->mipitx_drive < 3000) {
+               dev_warn(dev, "drive-strength-microamp is invalid %d, not in 3000 ~ 6000\n",
+                        mipi_tx->mipitx_drive);
+               mipi_tx->mipitx_drive = clamp_val(mipi_tx->mipitx_drive, 3000,
+                                                 6000);
+       }
+
        ref_clk_name = __clk_get_name(ref_clk);
 
        ret = of_property_read_string(dev->of_node, "clock-output-names",
@@ -160,6 +212,8 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
 
        mipi_tx->dev = dev;
 
+       mtk_mipi_tx_get_calibration_datal(mipi_tx);
+
        return of_clk_add_provider(dev->of_node, of_clk_src_simple_get,
                                   mipi_tx->pll);
 }
index 413f35d86219b33a10d139de5c56e08844ec381e..c76f07c3fdeb10132fb9125cb3c598c50f4131fc 100644 (file)
 #include <linux/delay.h>
 #include <linux/io.h>
 #include <linux/module.h>
+#include <linux/nvmem-consumer.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/phy/phy.h>
+#include <linux/slab.h>
 
 struct mtk_mipitx_data {
        const u32 mppll_preserve;
@@ -27,6 +29,8 @@ struct mtk_mipi_tx {
        struct device *dev;
        void __iomem *regs;
        u32 data_rate;
+       u32 mipitx_drive;
+       u32 rt_code[5];
        const struct mtk_mipitx_data *driver_data;
        struct clk_hw pll_hw;
        struct clk *pll;
index 91f08a351fd0781e487278e60174bc4fd0f57bb5..9f3e55aeebb21bfdb97f185ba2039fe0dc4a2527 100644 (file)
@@ -17,6 +17,9 @@
 #define RG_DSI_BG_CORE_EN              BIT(7)
 #define RG_DSI_PAD_TIEL_SEL            BIT(8)
 
+#define MIPITX_VOLTAGE_SEL     0x0010
+#define RG_DSI_HSTX_LDO_REF_SEL                (0xf << 6)
+
 #define MIPITX_PLL_PWR         0x0028
 #define MIPITX_PLL_CON0                0x002c
 #define MIPITX_PLL_CON1                0x0030
@@ -25,6 +28,7 @@
 #define MIPITX_PLL_CON4                0x003c
 #define RG_DSI_PLL_IBIAS               (3 << 10)
 
+#define MIPITX_D2P_RTCODE      0x0100
 #define MIPITX_D2_SW_CTL_EN    0x0144
 #define MIPITX_D0_SW_CTL_EN    0x0244
 #define MIPITX_CK_CKMODE_EN    0x0328
@@ -105,6 +109,24 @@ static const struct clk_ops mtk_mipi_tx_pll_ops = {
        .recalc_rate = mtk_mipi_tx_pll_recalc_rate,
 };
 
+static void mtk_mipi_tx_config_calibration_data(struct mtk_mipi_tx *mipi_tx)
+{
+       int i, j;
+
+       for (i = 0; i < 5; i++) {
+               if ((mipi_tx->rt_code[i] & 0x1f) == 0)
+                       mipi_tx->rt_code[i] |= 0x10;
+
+               if ((mipi_tx->rt_code[i] >> 5 & 0x1f) == 0)
+                       mipi_tx->rt_code[i] |= 0x10 << 5;
+
+               for (j = 0; j < 10; j++)
+                       mtk_mipi_tx_update_bits(mipi_tx,
+                               MIPITX_D2P_RTCODE * (i + 1) + j * 4,
+                               1, mipi_tx->rt_code[i] >> j & 1);
+       }
+}
+
 static void mtk_mipi_tx_power_on_signal(struct phy *phy)
 {
        struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
@@ -123,6 +145,12 @@ static void mtk_mipi_tx_power_on_signal(struct phy *phy)
        mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D3_SW_CTL_EN, DSI_SW_CTL_EN);
        mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_CK_SW_CTL_EN, DSI_SW_CTL_EN);
 
+       mtk_mipi_tx_update_bits(mipi_tx, MIPITX_VOLTAGE_SEL,
+                               RG_DSI_HSTX_LDO_REF_SEL,
+                               (mipi_tx->mipitx_drive - 3000) / 200 << 6);
+
+       mtk_mipi_tx_config_calibration_data(mipi_tx);
+
        mtk_mipi_tx_set_bits(mipi_tx, MIPITX_CK_CKMODE_EN, DSI_CK_CKMODE_EN);
 }
 
index 8c2e1b47e81a59c582b4556ed7cde3f7c4df2452..4c5aafcec7991bec654f03145f05a60c13db51c7 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/component.h>
 #include <linux/module.h>
 #include <linux/of_graph.h>
+#include <linux/sys_soc.h>
 #include <linux/platform_device.h>
 #include <linux/soc/amlogic/meson-canvas.h>
 
@@ -183,6 +184,24 @@ static void meson_remove_framebuffers(void)
        kfree(ap);
 }
 
+struct meson_drm_soc_attr {
+       struct meson_drm_soc_limits limits;
+       const struct soc_device_attribute *attrs;
+};
+
+static const struct meson_drm_soc_attr meson_drm_soc_attrs[] = {
+       /* S805X/S805Y HDMI PLL won't lock for HDMI PHY freq > 1,65GHz */
+       {
+               .limits = {
+                       .max_hdmi_phy_freq = 1650000,
+               },
+               .attrs = (const struct soc_device_attribute []) {
+                       { .soc_id = "GXL (S805*)", },
+                       { /* sentinel */ },
+               }
+       },
+};
+
 static int meson_drv_bind_master(struct device *dev, bool has_components)
 {
        struct platform_device *pdev = to_platform_device(dev);
@@ -191,7 +210,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
        struct drm_device *drm;
        struct resource *res;
        void __iomem *regs;
-       int ret;
+       int ret, i;
 
        /* Checks if an output connector is available */
        if (!meson_vpu_has_available_connectors(dev)) {
@@ -281,10 +300,20 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
        if (ret)
                goto free_drm;
 
+       /* Assign limits per soc revision/package */
+       for (i = 0 ; i < ARRAY_SIZE(meson_drm_soc_attrs) ; ++i) {
+               if (soc_device_match(meson_drm_soc_attrs[i].attrs)) {
+                       priv->limits = &meson_drm_soc_attrs[i].limits;
+                       break;
+               }
+       }
+
        /* Remove early framebuffers (ie. simplefb) */
        meson_remove_framebuffers();
 
-       drm_mode_config_init(drm);
+       ret = drmm_mode_config_init(drm);
+       if (ret)
+               goto free_drm;
        drm->mode_config.max_width = 3840;
        drm->mode_config.max_height = 2160;
        drm->mode_config.funcs = &meson_mode_config_funcs;
@@ -379,7 +408,6 @@ static void meson_drv_unbind(struct device *dev)
        drm_dev_unregister(drm);
        drm_irq_uninstall(drm);
        drm_kms_helper_poll_fini(drm);
-       drm_mode_config_cleanup(drm);
        drm_dev_put(drm);
 }
 
index 04fdf3826643276eff31c67d2212c8d1202d561c..5b23704a80d680c0530e9a5bdfbcbfdce98b1a97 100644 (file)
@@ -30,6 +30,10 @@ struct meson_drm_match_data {
        struct meson_afbcd_ops *afbcd_ops;
 };
 
+struct meson_drm_soc_limits {
+       unsigned int max_hdmi_phy_freq;
+};
+
 struct meson_drm {
        struct device *dev;
        enum vpu_compatible compat;
@@ -48,6 +52,8 @@ struct meson_drm {
        struct drm_plane *primary_plane;
        struct drm_plane *overlay_plane;
 
+       const struct meson_drm_soc_limits *limits;
+
        /* Components Data */
        struct {
                bool osd1_enabled;
index 64cb6ba4bc42d9ebb19f6dedb1272a36e094185d..24a12c453095fa762125a45aa9f5f50820c20ac7 100644 (file)
@@ -695,7 +695,7 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
        dev_dbg(connector->dev->dev, "%s: vclk:%d phy=%d venc=%d hdmi=%d\n",
                __func__, phy_freq, vclk_freq, venc_freq, hdmi_freq);
 
-       return meson_vclk_vic_supported_freq(phy_freq, vclk_freq);
+       return meson_vclk_vic_supported_freq(priv, phy_freq, vclk_freq);
 }
 
 /* Encoder */
index d5cbc47835bfa021252383e0ecacaf30dd66a279..35338ed1820996383ee1ef8afa717b004aa4e3f9 100644 (file)
@@ -223,7 +223,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
                        priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_16 |
                                                OSD_COLOR_MATRIX_16_RGB565;
                        break;
-               };
+               }
        }
 
        switch (fb->format->format) {
index fdf26dac9fa8ed8f7c0f9b56e83b48b790f5bc1e..0eb86943a35889e3afc9f786814cc89e07f4dcd5 100644 (file)
@@ -725,6 +725,13 @@ meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned int freq)
        /* In DMT mode, path after PLL is always /10 */
        freq *= 10;
 
+       /* Check against soc revision/package limits */
+       if (priv->limits) {
+               if (priv->limits->max_hdmi_phy_freq &&
+                   freq > priv->limits->max_hdmi_phy_freq)
+                       return MODE_CLOCK_HIGH;
+       }
+
        if (meson_hdmi_pll_find_params(priv, freq, &m, &frac, &od))
                return MODE_OK;
 
@@ -762,7 +769,7 @@ static void meson_hdmi_pll_generic_set(struct meson_drm *priv,
 }
 
 enum drm_mode_status
-meson_vclk_vic_supported_freq(unsigned int phy_freq,
+meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq,
                              unsigned int vclk_freq)
 {
        int i;
@@ -770,6 +777,13 @@ meson_vclk_vic_supported_freq(unsigned int phy_freq,
        DRM_DEBUG_DRIVER("phy_freq = %d vclk_freq = %d\n",
                         phy_freq, vclk_freq);
 
+       /* Check against soc revision/package limits */
+       if (priv->limits) {
+               if (priv->limits->max_hdmi_phy_freq &&
+                   phy_freq > priv->limits->max_hdmi_phy_freq)
+                       return MODE_CLOCK_HIGH;
+       }
+
        for (i = 0 ; params[i].pixel_freq ; ++i) {
                DRM_DEBUG_DRIVER("i = %d pixel_freq = %d alt = %d\n",
                                 i, params[i].pixel_freq,
index aed0ab2efa71dbf349cc1c365c27542666ecb754..60617aaf18dd1cedfab315cecb7d435e0d121e41 100644 (file)
@@ -25,7 +25,8 @@ enum {
 enum drm_mode_status
 meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned int freq);
 enum drm_mode_status
-meson_vclk_vic_supported_freq(unsigned int phy_freq, unsigned int vclk_freq);
+meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq,
+                             unsigned int vclk_freq);
 
 void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
                      unsigned int phy_freq, unsigned int vclk_freq,
index d491edd317ff31c6ab97baecf47d253f4e4c4237..aebc9ce43d551a82716f1aeba1dc36d5e1b3eac1 100644 (file)
@@ -260,7 +260,7 @@ int mgag200_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
                            uint32_t handle, uint32_t width, uint32_t height)
 {
        struct drm_device *dev = crtc->dev;
-       struct mga_device *mdev = (struct mga_device *)dev->dev_private;
+       struct mga_device *mdev = to_mga_device(dev);
        struct drm_gem_object *obj;
        struct drm_gem_vram_object *gbo = NULL;
        int ret;
@@ -307,7 +307,7 @@ err_drm_gem_object_put_unlocked:
 
 int mgag200_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
 {
-       struct mga_device *mdev = (struct mga_device *)crtc->dev->dev_private;
+       struct mga_device *mdev = to_mga_device(crtc->dev);
 
        /* Our origin is at (64,64) */
        x += 64;
index 7a5bad2f57d70bbe838a46c8e29f800e4466f313..c2f0e4b40b0527fd4dccf85a22fa89008c3f2211 100644 (file)
@@ -77,6 +77,8 @@ static int mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (ret)
                goto err_mgag200_driver_unload;
 
+       drm_fbdev_generic_setup(dev, 0);
+
        return 0;
 
 err_mgag200_driver_unload:
@@ -118,7 +120,7 @@ int mgag200_driver_dumb_create(struct drm_file *file,
                               struct drm_device *dev,
                               struct drm_mode_create_dumb *args)
 {
-       struct mga_device *mdev = dev->dev_private;
+       struct mga_device *mdev = to_mga_device(dev);
        unsigned long pg_align;
 
        if (WARN_ONCE(!dev->vram_mm, "VRAM MM not initialized"))
index 9691252d6233ff965e32ada40415053f61f5b74b..d9b7e96b214f8a0fc604b6dc5abdda1b34092e4d 100644 (file)
@@ -104,11 +104,6 @@ struct mga_crtc {
        bool enabled;
 };
 
-struct mga_mode_info {
-       bool mode_config_initialized;
-       struct mga_crtc *crtc;
-};
-
 struct mga_i2c_chan {
        struct i2c_adapter adapter;
        struct drm_device *dev;
@@ -160,17 +155,14 @@ struct mga_device {
        void __iomem                    *rmmio;
 
        struct mga_mc                   mc;
-       struct mga_mode_info            mode_info;
 
        struct mga_cursor cursor;
 
        size_t vram_fb_available;
 
        bool                            suspended;
-       int                             num_crtc;
        enum mga_type                   type;
        int                             has_sdram;
-       struct drm_display_mode         mode;
 
        int bpp_shifts[4];
 
@@ -179,9 +171,15 @@ struct mga_device {
        /* SE model number stored in reg 0x1e24 */
        u32 unique_rev_id;
 
+       struct mga_connector connector;
        struct drm_encoder encoder;
 };
 
+static inline struct mga_device *to_mga_device(struct drm_device *dev)
+{
+       return dev->dev_private;
+}
+
 static inline enum mga_type
 mgag200_type_from_driver_data(kernel_ulong_t driver_data)
 {
@@ -196,7 +194,6 @@ mgag200_flags_from_driver_data(kernel_ulong_t driver_data)
 
                                /* mgag200_mode.c */
 int mgag200_modeset_init(struct mga_device *mdev);
-void mgag200_modeset_fini(struct mga_device *mdev);
 
                                /* mgag200_main.c */
 int mgag200_driver_load(struct drm_device *dev, unsigned long flags);
index 9f4635916d32252833af5664e02fdf7716a97d32..09731e614e46d72b487ed2aca62b3509a74c5f40 100644 (file)
@@ -61,34 +61,34 @@ static inline void mga_i2c_set(struct mga_device *mdev, int mask, int state)
 static void mga_gpio_setsda(void *data, int state)
 {
        struct mga_i2c_chan *i2c = data;
-       struct mga_device *mdev = i2c->dev->dev_private;
+       struct mga_device *mdev = to_mga_device(i2c->dev);
        mga_i2c_set(mdev, i2c->data, state);
 }
 
 static void mga_gpio_setscl(void *data, int state)
 {
        struct mga_i2c_chan *i2c = data;
-       struct mga_device *mdev = i2c->dev->dev_private;
+       struct mga_device *mdev = to_mga_device(i2c->dev);
        mga_i2c_set(mdev, i2c->clock, state);
 }
 
 static int mga_gpio_getsda(void *data)
 {
        struct mga_i2c_chan *i2c = data;
-       struct mga_device *mdev = i2c->dev->dev_private;
+       struct mga_device *mdev = to_mga_device(i2c->dev);
        return (mga_i2c_read_gpio(mdev) & i2c->data) ? 1 : 0;
 }
 
 static int mga_gpio_getscl(void *data)
 {
        struct mga_i2c_chan *i2c = data;
-       struct mga_device *mdev = i2c->dev->dev_private;
+       struct mga_device *mdev = to_mga_device(i2c->dev);
        return (mga_i2c_read_gpio(mdev) & i2c->clock) ? 1 : 0;
 }
 
 struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev)
 {
-       struct mga_device *mdev = dev->dev_private;
+       struct mga_device *mdev = to_mga_device(dev);
        struct mga_i2c_chan *i2c;
        int ret;
        int data, clock;
index e278b6a547bde6fb8e3b8e3b5f7b93f9e8348d56..86df799fd38c51c3fc406e9690f7520536eb83fc 100644 (file)
 
 #include <linux/pci.h>
 
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
-
 #include "mgag200_drv.h"
 
-static const struct drm_mode_config_funcs mga_mode_funcs = {
-       .fb_create = drm_gem_fb_create
-};
-
 static int mga_probe_vram(struct mga_device *mdev, void __iomem *mem)
 {
        int offset;
@@ -66,51 +59,54 @@ static int mga_probe_vram(struct mga_device *mdev, void __iomem *mem)
 /* Map the framebuffer from the card and configure the core */
 static int mga_vram_init(struct mga_device *mdev)
 {
+       struct drm_device *dev = mdev->dev;
        void __iomem *mem;
 
        /* BAR 0 is VRAM */
-       mdev->mc.vram_base = pci_resource_start(mdev->dev->pdev, 0);
-       mdev->mc.vram_window = pci_resource_len(mdev->dev->pdev, 0);
+       mdev->mc.vram_base = pci_resource_start(dev->pdev, 0);
+       mdev->mc.vram_window = pci_resource_len(dev->pdev, 0);
 
-       if (!devm_request_mem_region(mdev->dev->dev, mdev->mc.vram_base, mdev->mc.vram_window,
-                               "mgadrmfb_vram")) {
+       if (!devm_request_mem_region(dev->dev, mdev->mc.vram_base,
+                                    mdev->mc.vram_window, "mgadrmfb_vram")) {
                DRM_ERROR("can't reserve VRAM\n");
                return -ENXIO;
        }
 
-       mem = pci_iomap(mdev->dev->pdev, 0, 0);
+       mem = pci_iomap(dev->pdev, 0, 0);
        if (!mem)
                return -ENOMEM;
 
        mdev->mc.vram_size = mga_probe_vram(mdev, mem);
 
-       pci_iounmap(mdev->dev->pdev, mem);
+       pci_iounmap(dev->pdev, mem);
 
        return 0;
 }
 
-static int mgag200_device_init(struct drm_device *dev,
-                              uint32_t flags)
+int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
 {
-       struct mga_device *mdev = dev->dev_private;
+       struct mga_device *mdev;
        int ret, option;
 
+       mdev = devm_kzalloc(dev->dev, sizeof(struct mga_device), GFP_KERNEL);
+       if (mdev == NULL)
+               return -ENOMEM;
+       dev->dev_private = (void *)mdev;
+       mdev->dev = dev;
+
        mdev->flags = mgag200_flags_from_driver_data(flags);
        mdev->type = mgag200_type_from_driver_data(flags);
 
-       /* Hardcode the number of CRTCs to 1 */
-       mdev->num_crtc = 1;
-
        pci_read_config_dword(dev->pdev, PCI_MGA_OPTION, &option);
        mdev->has_sdram = !(option & (1 << 14));
 
        /* BAR 0 is the framebuffer, BAR 1 contains registers */
-       mdev->rmmio_base = pci_resource_start(mdev->dev->pdev, 1);
-       mdev->rmmio_size = pci_resource_len(mdev->dev->pdev, 1);
+       mdev->rmmio_base = pci_resource_start(dev->pdev, 1);
+       mdev->rmmio_size = pci_resource_len(dev->pdev, 1);
 
-       if (!devm_request_mem_region(mdev->dev->dev, mdev->rmmio_base, mdev->rmmio_size,
-                               "mgadrmfb_mmio")) {
-               DRM_ERROR("can't reserve mmio registers\n");
+       if (!devm_request_mem_region(dev->dev, mdev->rmmio_base,
+                                    mdev->rmmio_size, "mgadrmfb_mmio")) {
+               drm_err(dev, "can't reserve mmio registers\n");
                return -ENOMEM;
        }
 
@@ -121,90 +117,43 @@ static int mgag200_device_init(struct drm_device *dev,
        /* stash G200 SE model number for later use */
        if (IS_G200_SE(mdev)) {
                mdev->unique_rev_id = RREG32(0x1e24);
-               DRM_DEBUG("G200 SE unique revision id is 0x%x\n",
-                         mdev->unique_rev_id);
+               drm_dbg(dev, "G200 SE unique revision id is 0x%x\n",
+                       mdev->unique_rev_id);
        }
 
        ret = mga_vram_init(mdev);
        if (ret)
                return ret;
 
-       mdev->bpp_shifts[0] = 0;
-       mdev->bpp_shifts[1] = 1;
-       mdev->bpp_shifts[2] = 0;
-       mdev->bpp_shifts[3] = 2;
-       return 0;
-}
-
-/*
- * Functions here will be called by the core once it's bound the driver to
- * a PCI device
- */
-
-
-int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
-{
-       struct mga_device *mdev;
-       int r;
-
-       mdev = devm_kzalloc(dev->dev, sizeof(struct mga_device), GFP_KERNEL);
-       if (mdev == NULL)
-               return -ENOMEM;
-       dev->dev_private = (void *)mdev;
-       mdev->dev = dev;
-
-       r = mgag200_device_init(dev, flags);
-       if (r) {
-               dev_err(&dev->pdev->dev, "Fatal error during GPU init: %d\n", r);
-               return r;
-       }
-       r = mgag200_mm_init(mdev);
-       if (r)
+       ret = mgag200_mm_init(mdev);
+       if (ret)
                goto err_mm;
 
-       drm_mode_config_init(dev);
-       dev->mode_config.funcs = (void *)&mga_mode_funcs;
-       if (IS_G200_SE(mdev) && mdev->vram_fb_available < (2048*1024))
-               dev->mode_config.preferred_depth = 16;
-       else
-               dev->mode_config.preferred_depth = 32;
-       dev->mode_config.prefer_shadow = 1;
-
-       r = mgag200_modeset_init(mdev);
-       if (r) {
-               dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
-               goto err_modeset;
+       ret = mgag200_modeset_init(mdev);
+       if (ret) {
+               drm_err(dev, "Fatal error during modeset init: %d\n", ret);
+               goto err_mgag200_mm_fini;
        }
 
-       r = mgag200_cursor_init(mdev);
-       if (r)
-               dev_warn(&dev->pdev->dev,
-                       "Could not initialize cursors. Not doing hardware cursors.\n");
-
-       r = drm_fbdev_generic_setup(mdev->dev, 0);
-       if (r)
-               goto err_modeset;
+       ret = mgag200_cursor_init(mdev);
+       if (ret)
+               drm_err(dev, "Could not initialize cursors. Not doing hardware cursors.\n");
 
        return 0;
 
-err_modeset:
-       drm_mode_config_cleanup(dev);
-       mgag200_cursor_fini(mdev);
+err_mgag200_mm_fini:
        mgag200_mm_fini(mdev);
 err_mm:
        dev->dev_private = NULL;
-
-       return r;
+       return ret;
 }
 
 void mgag200_driver_unload(struct drm_device *dev)
 {
-       struct mga_device *mdev = dev->dev_private;
+       struct mga_device *mdev = to_mga_device(dev);
 
        if (mdev == NULL)
                return;
-       mgag200_modeset_fini(mdev);
-       drm_mode_config_cleanup(dev);
        mgag200_cursor_fini(mdev);
        mgag200_mm_fini(mdev);
        dev->dev_private = NULL;
index d90e83959fca17616474917257ec1fdd0923cd2d..5f4ac36a97760398cd32e1565797e17ca6b3247f 100644 (file)
@@ -13,6 +13,7 @@
 
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fourcc.h>
+#include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_simple_kms_helper.h>
@@ -28,7 +29,7 @@
 static void mga_crtc_load_lut(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct mga_device *mdev = dev->dev_private;
+       struct mga_device *mdev = to_mga_device(dev);
        struct drm_framebuffer *fb = crtc->primary->fb;
        u16 *r_ptr, *g_ptr, *b_ptr;
        int i;
@@ -728,7 +729,7 @@ static int mga_crtc_set_plls(struct mga_device *mdev, long clock)
 
 static void mga_g200wb_prepare(struct drm_crtc *crtc)
 {
-       struct mga_device *mdev = crtc->dev->dev_private;
+       struct mga_device *mdev = to_mga_device(crtc->dev);
        u8 tmp;
        int iter_max;
 
@@ -783,7 +784,7 @@ static void mga_g200wb_prepare(struct drm_crtc *crtc)
 static void mga_g200wb_commit(struct drm_crtc *crtc)
 {
        u8 tmp;
-       struct mga_device *mdev = crtc->dev->dev_private;
+       struct mga_device *mdev = to_mga_device(crtc->dev);
 
        /* 1- The first step is to ensure that the vrsten and hrsten are set */
        WREG8(MGAREG_CRTCEXT_INDEX, 1);
@@ -833,7 +834,7 @@ static void mga_g200wb_commit(struct drm_crtc *crtc)
  */
 static void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
 {
-       struct mga_device *mdev = crtc->dev->dev_private;
+       struct mga_device *mdev = to_mga_device(crtc->dev);
        u32 addr;
        int count;
        u8 crtcext0;
@@ -902,7 +903,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
                                int x, int y, struct drm_framebuffer *old_fb)
 {
        struct drm_device *dev = crtc->dev;
-       struct mga_device *mdev = dev->dev_private;
+       struct mga_device *mdev = to_mga_device(dev);
        const struct drm_framebuffer *fb = crtc->primary->fb;
        int hdisplay, hsyncstart, hsyncend, htotal;
        int vdisplay, vsyncstart, vsyncend, vtotal;
@@ -1135,9 +1136,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
 
        WREG8(MGA_MISC_OUT, misc);
 
-       if (adjusted_mode)
-               memcpy(&mdev->mode, mode, sizeof(struct drm_display_mode));
-
        mga_crtc_do_set_base(crtc, old_fb, x, y, 0);
 
        /* reset tagfifo */
@@ -1263,7 +1261,7 @@ static int mga_resume(struct drm_crtc *crtc)
 static void mga_crtc_dpms(struct drm_crtc *crtc, int mode)
 {
        struct drm_device *dev = crtc->dev;
-       struct mga_device *mdev = dev->dev_private;
+       struct mga_device *mdev = to_mga_device(dev);
        u8 seq1 = 0, crtcext1 = 0;
 
        switch (mode) {
@@ -1317,7 +1315,7 @@ static void mga_crtc_dpms(struct drm_crtc *crtc, int mode)
 static void mga_crtc_prepare(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct mga_device *mdev = dev->dev_private;
+       struct mga_device *mdev = to_mga_device(dev);
        u8 tmp;
 
        /*      mga_resume(crtc);*/
@@ -1353,7 +1351,7 @@ static void mga_crtc_prepare(struct drm_crtc *crtc)
 static void mga_crtc_commit(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct mga_device *mdev = dev->dev_private;
+       struct mga_device *mdev = to_mga_device(dev);
        const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
        u8 tmp;
 
@@ -1433,6 +1431,7 @@ static const struct drm_crtc_helper_funcs mga_helper_funcs = {
 /* CRTC setup */
 static void mga_crtc_init(struct mga_device *mdev)
 {
+       struct drm_device *dev = mdev->dev;
        struct mga_crtc *mga_crtc;
 
        mga_crtc = kzalloc(sizeof(struct mga_crtc) +
@@ -1442,14 +1441,17 @@ static void mga_crtc_init(struct mga_device *mdev)
        if (mga_crtc == NULL)
                return;
 
-       drm_crtc_init(mdev->dev, &mga_crtc->base, &mga_crtc_funcs);
+       drm_crtc_init(dev, &mga_crtc->base, &mga_crtc_funcs);
 
        drm_mode_crtc_set_gamma_size(&mga_crtc->base, MGAG200_LUT_SIZE);
-       mdev->mode_info.crtc = mga_crtc;
 
        drm_crtc_helper_add(&mga_crtc->base, &mga_helper_funcs);
 }
 
+/*
+ * Connector
+ */
+
 static int mga_vga_get_modes(struct drm_connector *connector)
 {
        struct mga_connector *mga_connector = to_mga_connector(connector);
@@ -1495,7 +1497,7 @@ static enum drm_mode_status mga_vga_mode_valid(struct drm_connector *connector,
                                 struct drm_display_mode *mode)
 {
        struct drm_device *dev = connector->dev;
-       struct mga_device *mdev = (struct mga_device*)dev->dev_private;
+       struct mga_device *mdev = to_mga_device(dev);
        int bpp = 32;
 
        if (IS_G200_SE(mdev)) {
@@ -1574,7 +1576,6 @@ static void mga_connector_destroy(struct drm_connector *connector)
        struct mga_connector *mga_connector = to_mga_connector(connector);
        mgag200_i2c_destroy(mga_connector->i2c);
        drm_connector_cleanup(connector);
-       kfree(connector);
 }
 
 static const struct drm_connector_helper_funcs mga_vga_connector_helper_funcs = {
@@ -1588,70 +1589,96 @@ static const struct drm_connector_funcs mga_vga_connector_funcs = {
        .destroy = mga_connector_destroy,
 };
 
-static struct drm_connector *mga_vga_init(struct drm_device *dev)
+static int mgag200_vga_connector_init(struct mga_device *mdev)
 {
-       struct drm_connector *connector;
-       struct mga_connector *mga_connector;
-
-       mga_connector = kzalloc(sizeof(struct mga_connector), GFP_KERNEL);
-       if (!mga_connector)
-               return NULL;
-
-       connector = &mga_connector->base;
-       mga_connector->i2c = mgag200_i2c_create(dev);
-       if (!mga_connector->i2c)
-               DRM_ERROR("failed to add ddc bus\n");
+       struct drm_device *dev = mdev->dev;
+       struct mga_connector *mconnector = &mdev->connector;
+       struct drm_connector *connector = &mconnector->base;
+       struct mga_i2c_chan *i2c;
+       int ret;
 
-       drm_connector_init_with_ddc(dev, connector,
-                                   &mga_vga_connector_funcs,
-                                   DRM_MODE_CONNECTOR_VGA,
-                                   &mga_connector->i2c->adapter);
+       i2c = mgag200_i2c_create(dev);
+       if (!i2c)
+               drm_warn(dev, "failed to add DDC bus\n");
 
+       ret = drm_connector_init_with_ddc(dev, connector,
+                                         &mga_vga_connector_funcs,
+                                         DRM_MODE_CONNECTOR_VGA,
+                                         &i2c->adapter);
+       if (ret)
+               goto err_mgag200_i2c_destroy;
        drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs);
 
-       drm_connector_register(connector);
+       mconnector->i2c = i2c;
 
-       return connector;
+       return 0;
+
+err_mgag200_i2c_destroy:
+       mgag200_i2c_destroy(i2c);
+       return ret;
 }
 
+static const struct drm_mode_config_funcs mgag200_mode_config_funcs = {
+       .fb_create = drm_gem_fb_create
+};
+
+static unsigned int mgag200_preferred_depth(struct mga_device *mdev)
+{
+       if (IS_G200_SE(mdev) && mdev->vram_fb_available < (2048*1024))
+               return 16;
+       else
+               return 32;
+}
 
 int mgag200_modeset_init(struct mga_device *mdev)
 {
+       struct drm_device *dev = mdev->dev;
        struct drm_encoder *encoder = &mdev->encoder;
-       struct drm_connector *connector;
+       struct drm_connector *connector = &mdev->connector.base;
        int ret;
 
-       mdev->mode_info.mode_config_initialized = true;
+       mdev->bpp_shifts[0] = 0;
+       mdev->bpp_shifts[1] = 1;
+       mdev->bpp_shifts[2] = 0;
+       mdev->bpp_shifts[3] = 2;
+
+       ret = drmm_mode_config_init(dev);
+       if (ret) {
+               drm_err(dev, "drmm_mode_config_init() failed, error %d\n",
+                       ret);
+               return ret;
+       }
+
+       dev->mode_config.max_width = MGAG200_MAX_FB_WIDTH;
+       dev->mode_config.max_height = MGAG200_MAX_FB_HEIGHT;
 
-       mdev->dev->mode_config.max_width = MGAG200_MAX_FB_WIDTH;
-       mdev->dev->mode_config.max_height = MGAG200_MAX_FB_HEIGHT;
+       dev->mode_config.preferred_depth = mgag200_preferred_depth(mdev);
+       dev->mode_config.prefer_shadow = 1;
 
-       mdev->dev->mode_config.fb_base = mdev->mc.vram_base;
+       dev->mode_config.fb_base = mdev->mc.vram_base;
+
+       dev->mode_config.funcs = &mgag200_mode_config_funcs;
 
        mga_crtc_init(mdev);
 
-       ret = drm_simple_encoder_init(mdev->dev, encoder,
-                                     DRM_MODE_ENCODER_DAC);
+       ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DAC);
        if (ret) {
-               drm_err(mdev->dev,
+               drm_err(dev,
                        "drm_simple_encoder_init() failed, error %d\n",
                        ret);
                return ret;
        }
        encoder->possible_crtcs = 0x1;
 
-       connector = mga_vga_init(mdev->dev);
-       if (!connector) {
-               DRM_ERROR("mga_vga_init failed\n");
-               return -1;
+       ret = mgag200_vga_connector_init(mdev);
+       if (ret) {
+               drm_err(dev,
+                       "mgag200_vga_connector_init() failed, error %d\n",
+                       ret);
+               return ret;
        }
 
        drm_connector_attach_encoder(connector, encoder);
 
        return 0;
 }
-
-void mgag200_modeset_fini(struct mga_device *mdev)
-{
-
-}
index 075ecce4b5e06a22396c9db56ee13992dcd33ce2..8cae2ca4af6ba4e22a504771f2139b5628245e84 100644 (file)
@@ -148,27 +148,19 @@ reset_set(void *data, u64 val)
 DEFINE_SIMPLE_ATTRIBUTE(reset_fops, NULL, reset_set, "%llx\n");
 
 
-int a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor)
+void a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor)
 {
        struct drm_device *dev;
-       int ret;
 
        if (!minor)
-               return 0;
+               return;
 
        dev = minor->dev;
 
-       ret = drm_debugfs_create_files(a5xx_debugfs_list,
-                       ARRAY_SIZE(a5xx_debugfs_list),
-                       minor->debugfs_root, minor);
-
-       if (ret) {
-               DRM_DEV_ERROR(dev->dev, "could not install a5xx_debugfs_list\n");
-               return ret;
-       }
+       drm_debugfs_create_files(a5xx_debugfs_list,
+                                ARRAY_SIZE(a5xx_debugfs_list),
+                                minor->debugfs_root, minor);
 
        debugfs_create_file("reset", S_IWUGO, minor->debugfs_root, dev,
                            &reset_fops);
-
-       return 0;
 }
index 833468ce6b6d7bb811f17e291c25b006bde45555..54868d4e3958f318dc67dd48506c04a41969a49e 100644 (file)
@@ -41,7 +41,7 @@ struct a5xx_gpu {
 #define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
 
 #ifdef CONFIG_DEBUG_FS
-int a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor);
+void a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor);
 #endif
 
 /*
index 47b989834af166410c10921b0ed4f0980c42a2b5..c902c6503675f9e9cebd3e2d536e0352f146b685 100644 (file)
@@ -259,17 +259,9 @@ static struct drm_info_list mdp5_debugfs_list[] = {
 
 static int mdp5_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
 {
-       struct drm_device *dev = minor->dev;
-       int ret;
-
-       ret = drm_debugfs_create_files(mdp5_debugfs_list,
-                       ARRAY_SIZE(mdp5_debugfs_list),
-                       minor->debugfs_root, minor);
-
-       if (ret) {
-               DRM_DEV_ERROR(dev->dev, "could not install mdp5_debugfs_list\n");
-               return ret;
-       }
+       drm_debugfs_create_files(mdp5_debugfs_list,
+                                ARRAY_SIZE(mdp5_debugfs_list),
+                                minor->debugfs_root, minor);
 
        return 0;
 }
index 1c74381a4fc9d3eaacc4ffa6f900160db8f3096d..ee2e270f464c1d7b664abff78f9aea390e258d2a 100644 (file)
@@ -214,31 +214,20 @@ int msm_debugfs_late_init(struct drm_device *dev)
        return ret;
 }
 
-int msm_debugfs_init(struct drm_minor *minor)
+void msm_debugfs_init(struct drm_minor *minor)
 {
        struct drm_device *dev = minor->dev;
        struct msm_drm_private *priv = dev->dev_private;
-       int ret;
-
-       ret = drm_debugfs_create_files(msm_debugfs_list,
-                       ARRAY_SIZE(msm_debugfs_list),
-                       minor->debugfs_root, minor);
 
-       if (ret) {
-               DRM_DEV_ERROR(dev->dev, "could not install msm_debugfs_list\n");
-               return ret;
-       }
+       drm_debugfs_create_files(msm_debugfs_list,
+                                ARRAY_SIZE(msm_debugfs_list),
+                                minor->debugfs_root, minor);
 
        debugfs_create_file("gpu", S_IRUSR, minor->debugfs_root,
                dev, &msm_gpu_fops);
 
-       if (priv->kms && priv->kms->funcs->debugfs_init) {
-               ret = priv->kms->funcs->debugfs_init(priv->kms, minor);
-               if (ret)
-                       return ret;
-       }
-
-       return ret;
+       if (priv->kms && priv->kms->funcs->debugfs_init)
+               priv->kms->funcs->debugfs_init(priv->kms, minor);
 }
 #endif
 
index 2b91f8c178ad37d46ac81fb7039f4c6e85588af3..ef58f66abbb341eccfbfeff9d759141e30ccc937 100644 (file)
@@ -8,7 +8,7 @@
 #define __MSM_DEBUGFS_H__
 
 #ifdef CONFIG_DEBUG_FS
-int msm_debugfs_init(struct drm_minor *minor);
+void msm_debugfs_init(struct drm_minor *minor);
 #endif
 
 #endif /* __MSM_DEBUGFS_H__ */
index be5bc2e8425c579de32cd5015e3cc71bf005c1a0..6ccae4ba905cc8c9407a32b742d5573801c8578a 100644 (file)
@@ -57,7 +57,7 @@ struct msm_gpu_funcs {
        void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
                        struct drm_printer *p);
        /* for generation specific debugfs: */
-       int (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
+       void (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
 #endif
        unsigned long (*gpu_busy)(struct msm_gpu *gpu);
        struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu);
index 7a62fa04272daa7d99a55e8f626ce3310a8fbcf2..49e57fba4925ed2d673c6885949e6b3f9f39bdbe 100644 (file)
@@ -1,8 +1,10 @@
+NOUVEAU_PATH ?= $(srctree)
+
 # SPDX-License-Identifier: MIT
-ccflags-y += -I $(srctree)/$(src)/include
-ccflags-y += -I $(srctree)/$(src)/include/nvkm
-ccflags-y += -I $(srctree)/$(src)/nvkm
-ccflags-y += -I $(srctree)/$(src)
+ccflags-y += -I $(NOUVEAU_PATH)/$(src)/include
+ccflags-y += -I $(NOUVEAU_PATH)/$(src)/include/nvkm
+ccflags-y += -I $(NOUVEAU_PATH)/$(src)/nvkm
+ccflags-y += -I $(NOUVEAU_PATH)/$(src)
 
 # NVKM - HW resource manager
 #- code also used by various userspace tools/tests
index 1f08de4241e01b246d86d85ab4658dd56d368f03..2de589caf5081dd8703c27b80d86457700e6350c 100644 (file)
@@ -605,15 +605,16 @@ static int
 nv_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
 {
        struct nv04_display *disp = nv04_display(crtc->dev);
-       struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->primary->fb);
+       struct drm_framebuffer *fb = crtc->primary->fb;
+       struct nouveau_bo *nvbo = nouveau_gem_object(fb->obj[0]);
        struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
        int ret;
 
-       ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM, false);
+       ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false);
        if (ret == 0) {
                if (disp->image[nv_crtc->index])
                        nouveau_bo_unpin(disp->image[nv_crtc->index]);
-               nouveau_bo_ref(nvfb->nvbo, &disp->image[nv_crtc->index]);
+               nouveau_bo_ref(nvbo, &disp->image[nv_crtc->index]);
        }
 
        return ret;
@@ -822,8 +823,8 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
        struct drm_device *dev = crtc->dev;
        struct nouveau_drm *drm = nouveau_drm(dev);
        struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
+       struct nouveau_bo *nvbo;
        struct drm_framebuffer *drm_fb;
-       struct nouveau_framebuffer *fb;
        int arb_burst, arb_lwm;
 
        NV_DEBUG(drm, "index %d\n", nv_crtc->index);
@@ -839,13 +840,12 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
         */
        if (atomic) {
                drm_fb = passed_fb;
-               fb = nouveau_framebuffer(passed_fb);
        } else {
                drm_fb = crtc->primary->fb;
-               fb = nouveau_framebuffer(crtc->primary->fb);
        }
 
-       nv_crtc->fb.offset = fb->nvbo->bo.offset;
+       nvbo = nouveau_gem_object(drm_fb->obj[0]);
+       nv_crtc->fb.offset = nvbo->bo.offset;
 
        if (nv_crtc->lut.depth != drm_fb->format->depth) {
                nv_crtc->lut.depth = drm_fb->format->depth;
@@ -1143,8 +1143,9 @@ nv04_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
        const int swap_interval = (flags & DRM_MODE_PAGE_FLIP_ASYNC) ? 0 : 1;
        struct drm_device *dev = crtc->dev;
        struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->primary->fb)->nvbo;
-       struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo;
+       struct drm_framebuffer *old_fb = crtc->primary->fb;
+       struct nouveau_bo *old_bo = nouveau_gem_object(old_fb->obj[0]);
+       struct nouveau_bo *new_bo = nouveau_gem_object(fb->obj[0]);
        struct nv04_page_flip_state *s;
        struct nouveau_channel *chan;
        struct nouveau_cli *cli;
index 44ee82d0c9b6a6af7b9e00ad272a3d70814c5744..0f4ebefed1fd0db40ea3fcb1a1b1a419f4834641 100644 (file)
@@ -30,6 +30,7 @@
 #include "nouveau_encoder.h"
 #include "nouveau_connector.h"
 #include "nouveau_bo.h"
+#include "nouveau_gem.h"
 
 #include <nvif/if0004.h>
 
@@ -52,13 +53,13 @@ nv04_display_fini(struct drm_device *dev, bool suspend)
 
        /* Un-pin FB and cursors so they'll be evicted to system memory. */
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-               struct nouveau_framebuffer *nouveau_fb;
+               struct drm_framebuffer *fb = crtc->primary->fb;
+               struct nouveau_bo *nvbo;
 
-               nouveau_fb = nouveau_framebuffer(crtc->primary->fb);
-               if (!nouveau_fb || !nouveau_fb->nvbo)
+               if (!fb || !fb->obj[0])
                        continue;
-
-               nouveau_bo_unpin(nouveau_fb->nvbo);
+               nvbo = nouveau_gem_object(fb->obj[0]);
+               nouveau_bo_unpin(nvbo);
        }
 
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -104,13 +105,13 @@ nv04_display_init(struct drm_device *dev, bool resume, bool runtime)
 
        /* Re-pin FB/cursors. */
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-               struct nouveau_framebuffer *nouveau_fb;
+               struct drm_framebuffer *fb = crtc->primary->fb;
+               struct nouveau_bo *nvbo;
 
-               nouveau_fb = nouveau_framebuffer(crtc->primary->fb);
-               if (!nouveau_fb || !nouveau_fb->nvbo)
+               if (!fb || !fb->obj[0])
                        continue;
-
-               ret = nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM, true);
+               nvbo = nouveau_gem_object(fb->obj[0]);
+               ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, true);
                if (ret)
                        NV_ERROR(drm, "Could not pin framebuffer\n");
        }
index a3a0a73ae8abd08396f2bbaa21e1f1166f027c7e..6248fd1dbc6ddd1b3686a3f6f4aabbe5f2668f40 100644 (file)
@@ -31,6 +31,7 @@
 #include "nouveau_bo.h"
 #include "nouveau_connector.h"
 #include "nouveau_display.h"
+#include "nouveau_gem.h"
 #include "nvreg.h"
 #include "disp.h"
 
@@ -120,9 +121,9 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
        struct nvif_object *dev = &drm->client.device.object;
        struct nouveau_plane *nv_plane =
                container_of(plane, struct nouveau_plane, base);
-       struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
        struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
        struct nouveau_bo *cur = nv_plane->cur;
+       struct nouveau_bo *nvbo;
        bool flip = nv_plane->flip;
        int soff = NV_PCRTC0_SIZE * nv_crtc->index;
        int soff2 = NV_PCRTC0_SIZE * !nv_crtc->index;
@@ -140,17 +141,18 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
        if (ret)
                return ret;
 
-       ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM, false);
+       nvbo = nouveau_gem_object(fb->obj[0]);
+       ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false);
        if (ret)
                return ret;
 
-       nv_plane->cur = nv_fb->nvbo;
+       nv_plane->cur = nvbo;
 
        nvif_mask(dev, NV_PCRTC_ENGINE_CTRL + soff, NV_CRTC_FSEL_OVERLAY, NV_CRTC_FSEL_OVERLAY);
        nvif_mask(dev, NV_PCRTC_ENGINE_CTRL + soff2, NV_CRTC_FSEL_OVERLAY, 0);
 
        nvif_wr32(dev, NV_PVIDEO_BASE(flip), 0);
-       nvif_wr32(dev, NV_PVIDEO_OFFSET_BUFF(flip), nv_fb->nvbo->bo.offset);
+       nvif_wr32(dev, NV_PVIDEO_OFFSET_BUFF(flip), nvbo->bo.offset);
        nvif_wr32(dev, NV_PVIDEO_SIZE_IN(flip), src_h << 16 | src_w);
        nvif_wr32(dev, NV_PVIDEO_POINT_IN(flip), src_y << 16 | src_x);
        nvif_wr32(dev, NV_PVIDEO_DS_DX(flip), (src_w << 20) / crtc_w);
@@ -172,7 +174,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
        if (format & NV_PVIDEO_FORMAT_PLANAR) {
                nvif_wr32(dev, NV_PVIDEO_UVPLANE_BASE(flip), 0);
                nvif_wr32(dev, NV_PVIDEO_UVPLANE_OFFSET_BUFF(flip),
-                       nv_fb->nvbo->bo.offset + fb->offsets[1]);
+                       nvbo->bo.offset + fb->offsets[1]);
        }
        nvif_wr32(dev, NV_PVIDEO_FORMAT(flip), format | fb->pitches[0]);
        nvif_wr32(dev, NV_PVIDEO_STOP, 0);
@@ -368,8 +370,8 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
        struct nvif_object *dev = &nouveau_drm(plane->dev)->client.device.object;
        struct nouveau_plane *nv_plane =
                container_of(plane, struct nouveau_plane, base);
-       struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
        struct nouveau_bo *cur = nv_plane->cur;
+       struct nouveau_bo *nvbo;
        uint32_t overlay = 1;
        int brightness = (nv_plane->brightness - 512) * 62 / 512;
        int ret, i;
@@ -384,11 +386,12 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
        if (ret)
                return ret;
 
-       ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM, false);
+       nvbo = nouveau_gem_object(fb->obj[0]);
+       ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false);
        if (ret)
                return ret;
 
-       nv_plane->cur = nv_fb->nvbo;
+       nv_plane->cur = nvbo;
 
        nvif_wr32(dev, NV_PVIDEO_OE_STATE, 0);
        nvif_wr32(dev, NV_PVIDEO_SU_STATE, 0);
@@ -396,7 +399,7 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
 
        for (i = 0; i < 2; i++) {
                nvif_wr32(dev, NV_PVIDEO_BUFF0_START_ADDRESS + 4 * i,
-                         nv_fb->nvbo->bo.offset);
+                         nvbo->bo.offset);
                nvif_wr32(dev, NV_PVIDEO_BUFF0_PITCH_LENGTH + 4 * i,
                          fb->pitches[0]);
                nvif_wr32(dev, NV_PVIDEO_BUFF0_OFFSET + 4 * i, 0);
index ee782151d332296d99ece6ae9f2dbbac1c34ebdd..511258bfbcbc4dbc7e593271afc5111abcdc56f3 100644 (file)
@@ -263,7 +263,8 @@ base507c_new_(const struct nv50_wndw_func *func, const u32 *format,
        struct nv50_disp_base_channel_dma_v0 args = {
                .head = head,
        };
-       struct nv50_disp *disp = nv50_disp(drm->dev);
+       struct nouveau_display *disp = nouveau_display(drm->dev);
+       struct nv50_disp *disp50 = nv50_disp(drm->dev);
        struct nv50_wndw *wndw;
        int ret;
 
@@ -273,9 +274,9 @@ base507c_new_(const struct nv50_wndw_func *func, const u32 *format,
        if (*pwndw = wndw, ret)
                return ret;
 
-       ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
+       ret = nv50_dmac_create(&drm->client.device, &disp->disp.object,
                               &oclass, head, &args, sizeof(args),
-                              disp->sync->bo.offset, &wndw->wndw);
+                              disp50->sync->bo.offset, &wndw->wndw);
        if (ret) {
                NV_ERROR(drm, "base%04x allocation failed: %d\n", oclass, ret);
                return ret;
index ff94f3f6f264ef0c01e8c5811e104546f621a8d1..99157dc94d2350f1a6be09ade0a1bcd9138ea20b 100644 (file)
@@ -2,6 +2,7 @@
 #define __NV50_KMS_CORE_H__
 #include "disp.h"
 #include "atom.h"
+#include <nouveau_encoder.h>
 
 struct nv50_core {
        const struct nv50_core_func *func;
@@ -15,6 +16,7 @@ void nv50_core_del(struct nv50_core **);
 struct nv50_core_func {
        void (*init)(struct nv50_core *);
        void (*ntfy_init)(struct nouveau_bo *, u32 offset);
+       int (*caps_init)(struct nouveau_drm *, struct nv50_disp *);
        int (*ntfy_wait_done)(struct nouveau_bo *, u32 offset,
                              struct nvif_device *);
        void (*update)(struct nv50_core *, u32 *interlock, bool ntfy);
@@ -27,6 +29,9 @@ struct nv50_core_func {
        const struct nv50_outp_func {
                void (*ctrl)(struct nv50_core *, int or, u32 ctrl,
                             struct nv50_head_atom *);
+               /* XXX: Only used by SORs and PIORs for now */
+               void (*get_caps)(struct nv50_disp *,
+                                struct nouveau_encoder *, int or);
        } *dac, *pior, *sor;
 };
 
@@ -35,6 +40,7 @@ int core507d_new_(const struct nv50_core_func *, struct nouveau_drm *, s32,
                  struct nv50_core **);
 void core507d_init(struct nv50_core *);
 void core507d_ntfy_init(struct nouveau_bo *, u32);
+int core507d_caps_init(struct nouveau_drm *, struct nv50_disp *);
 int core507d_ntfy_wait_done(struct nouveau_bo *, u32, struct nvif_device *);
 void core507d_update(struct nv50_core *, u32 *, bool);
 
@@ -51,6 +57,7 @@ extern const struct nv50_outp_func sor907d;
 int core917d_new(struct nouveau_drm *, s32, struct nv50_core **);
 
 int corec37d_new(struct nouveau_drm *, s32, struct nv50_core **);
+int corec37d_caps_init(struct nouveau_drm *, struct nv50_disp *);
 int corec37d_ntfy_wait_done(struct nouveau_bo *, u32, struct nvif_device *);
 void corec37d_update(struct nv50_core *, u32 *, bool);
 void corec37d_wndw_owner(struct nv50_core *);
index c5152c39c684dc1e50896e88eef458d38e162f88..e341f572c2696e3672109ed1efd47cc70f34380e 100644 (file)
@@ -62,6 +62,20 @@ core507d_ntfy_init(struct nouveau_bo *bo, u32 offset)
        nouveau_bo_wr32(bo, offset / 4, 0x00000000);
 }
 
+int
+core507d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp)
+{
+       u32 *push = evo_wait(&disp->core->chan, 2);
+
+       if (push) {
+               evo_mthd(push, 0x008c, 1);
+               evo_data(push, 0x0);
+               evo_kick(push, &disp->core->chan);
+       }
+
+       return 0;
+}
+
 void
 core507d_init(struct nv50_core *core)
 {
@@ -77,6 +91,7 @@ static const struct nv50_core_func
 core507d = {
        .init = core507d_init,
        .ntfy_init = core507d_ntfy_init,
+       .caps_init = core507d_caps_init,
        .ntfy_wait_done = core507d_ntfy_wait_done,
        .update = core507d_update,
        .head = &head507d,
index 6123a068f8364ac49012d4caf805fb082cd04b14..2e0c1c536afebf44a30c4ad4335cda93467d7d95 100644 (file)
@@ -26,6 +26,7 @@ static const struct nv50_core_func
 core827d = {
        .init = core507d_init,
        .ntfy_init = core507d_ntfy_init,
+       .caps_init = core507d_caps_init,
        .ntfy_wait_done = core507d_ntfy_wait_done,
        .update = core507d_update,
        .head = &head827d,
index ef822f8134355a9a4e4915127b93d43f2e717905..2716298326299cf07c93cf5df1a13e54aea9a2c6 100644 (file)
@@ -26,6 +26,7 @@ static const struct nv50_core_func
 core907d = {
        .init = core507d_init,
        .ntfy_init = core507d_ntfy_init,
+       .caps_init = core507d_caps_init,
        .ntfy_wait_done = core507d_ntfy_wait_done,
        .update = core507d_update,
        .head = &head907d,
index 392338df5bfdcaa2170196031f6fa573f6d88312..5cc072d4c30fef18b32bebe1ee8538e5fa5d2351 100644 (file)
@@ -26,6 +26,7 @@ static const struct nv50_core_func
 core917d = {
        .init = core507d_init,
        .ntfy_init = core507d_ntfy_init,
+       .caps_init = core507d_caps_init,
        .ntfy_wait_done = core507d_ntfy_wait_done,
        .update = core507d_update,
        .head = &head917d,
index c03cb987856bd3d2a75543ec88bb4b480674a989..e0c8811fb8e45ca05d27d4e8afa2c4fe49fc0060 100644 (file)
@@ -22,6 +22,7 @@
 #include "core.h"
 #include "head.h"
 
+#include <nvif/class.h>
 #include <nouveau_bo.h>
 
 #include <nvif/timer.h>
@@ -87,6 +88,30 @@ corec37d_ntfy_init(struct nouveau_bo *bo, u32 offset)
        nouveau_bo_wr32(bo, offset / 4 + 3, 0x00000000);
 }
 
+int corec37d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp)
+{
+       int ret;
+
+       ret = nvif_object_init(&disp->disp->object, 0, GV100_DISP_CAPS,
+                              NULL, 0, &disp->caps);
+       if (ret) {
+               NV_ERROR(drm,
+                        "Failed to init notifier caps region: %d\n",
+                        ret);
+               return ret;
+       }
+
+       ret = nvif_object_map(&disp->caps, NULL, 0);
+       if (ret) {
+               NV_ERROR(drm,
+                        "Failed to map notifier caps region: %d\n",
+                        ret);
+               return ret;
+       }
+
+       return 0;
+}
+
 static void
 corec37d_init(struct nv50_core *core)
 {
@@ -111,6 +136,7 @@ static const struct nv50_core_func
 corec37d = {
        .init = corec37d_init,
        .ntfy_init = corec37d_ntfy_init,
+       .caps_init = corec37d_caps_init,
        .ntfy_wait_done = corec37d_ntfy_wait_done,
        .update = corec37d_update,
        .wndw.owner = corec37d_wndw_owner,
index 147adcd609378829f25dc5a1f4449703ebe9e713..10ba9e9e4ae6b4889f65b884ea4dcffc2a01824b 100644 (file)
@@ -46,6 +46,7 @@ static const struct nv50_core_func
 corec57d = {
        .init = corec57d_init,
        .ntfy_init = corec37d_ntfy_init,
+       .caps_init = corec37d_caps_init,
        .ntfy_wait_done = corec37d_ntfy_wait_done,
        .update = corec37d_update,
        .wndw.owner = corec37d_wndw_owner,
index 8c5cf096f69bb572e714e9800e8815581e65caef..658a200ab616e5becc6cf915ffcde9789296eeca 100644 (file)
@@ -32,7 +32,7 @@
 bool
 curs507a_space(struct nv50_wndw *wndw)
 {
-       nvif_msec(&nouveau_drm(wndw->plane.dev)->client.device, 2,
+       nvif_msec(&nouveau_drm(wndw->plane.dev)->client.device, 100,
                if (nvif_rd32(&wndw->wimm.base.user, 0x0008) >= 4)
                        return true;
        );
index 6be9df1820c51366e016c4cdd7c2f4ff3850689d..7622490d86024cbe92e61c8ec0bf39a80aad1b48 100644 (file)
@@ -482,15 +482,16 @@ nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
  * audio component binding for ELD notification
  */
 static void
-nv50_audio_component_eld_notify(struct drm_audio_component *acomp, int port)
+nv50_audio_component_eld_notify(struct drm_audio_component *acomp, int port,
+                               int dev_id)
 {
        if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
                acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
-                                                port, -1);
+                                                port, dev_id);
 }
 
 static int
-nv50_audio_component_get_eld(struct device *kdev, int port, int pipe,
+nv50_audio_component_get_eld(struct device *kdev, int port, int dev_id,
                             bool *enabled, unsigned char *buf, int max_bytes)
 {
        struct drm_device *drm_dev = dev_get_drvdata(kdev);
@@ -506,7 +507,8 @@ nv50_audio_component_get_eld(struct device *kdev, int port, int pipe,
                nv_encoder = nouveau_encoder(encoder);
                nv_connector = nouveau_encoder_connector_get(nv_encoder);
                nv_crtc = nouveau_crtc(encoder->crtc);
-               if (!nv_connector || !nv_crtc || nv_crtc->index != port)
+               if (!nv_connector || !nv_crtc || nv_encoder->or != port ||
+                   nv_crtc->index != dev_id)
                        continue;
                *enabled = drm_detect_monitor_audio(nv_connector->edid);
                if (*enabled) {
@@ -600,7 +602,8 @@ nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
 
        nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
 
-       nv50_audio_component_eld_notify(drm->audio.component, nv_crtc->index);
+       nv50_audio_component_eld_notify(drm->audio.component, nv_encoder->or,
+                                       nv_crtc->index);
 }
 
 static void
@@ -634,7 +637,8 @@ nv50_audio_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
        nvif_mthd(&disp->disp->object, 0, &args,
                  sizeof(args.base) + drm_eld_size(args.data));
 
-       nv50_audio_component_eld_notify(drm->audio.component, nv_crtc->index);
+       nv50_audio_component_eld_notify(drm->audio.component, nv_encoder->or,
+                                       nv_crtc->index);
 }
 
 /******************************************************************************
@@ -904,15 +908,9 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
        if (!state->duplicated) {
                const int clock = crtc_state->adjusted_mode.clock;
 
-               /*
-                * XXX: Since we don't use HDR in userspace quite yet, limit
-                * the bpc to 8 to save bandwidth on the topology. In the
-                * future, we'll want to properly fix this by dynamically
-                * selecting the highest possible bpc that would fit in the
-                * topology
-                */
-               asyh->or.bpc = min(connector->display_info.bpc, 8U);
-               asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3, false);
+               asyh->or.bpc = connector->display_info.bpc;
+               asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3,
+                                                   false);
        }
 
        slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, mstc->port,
@@ -1058,7 +1056,14 @@ static enum drm_mode_status
 nv50_mstc_mode_valid(struct drm_connector *connector,
                     struct drm_display_mode *mode)
 {
-       return MODE_OK;
+       struct nv50_mstc *mstc = nv50_mstc(connector);
+       struct nouveau_encoder *outp = mstc->mstm->outp;
+
+       /* TODO: calculate the PBN from the dotclock and validate against the
+        * MSTB's max possible PBN
+        */
+
+       return nv50_dp_mode_valid(connector, outp, mode, NULL);
 }
 
 static int
@@ -1072,8 +1077,17 @@ nv50_mstc_get_modes(struct drm_connector *connector)
        if (mstc->edid)
                ret = drm_add_edid_modes(&mstc->connector, mstc->edid);
 
-       if (!mstc->connector.display_info.bpc)
-               mstc->connector.display_info.bpc = 8;
+       /*
+        * XXX: Since we don't use HDR in userspace quite yet, limit the bpc
+        * to 8 to save bandwidth on the topology. In the future, we'll want
+        * to properly fix this by dynamically selecting the highest possible
+        * bpc that would fit in the topology
+        */
+       if (connector->display_info.bpc)
+               connector->display_info.bpc =
+                       clamp(connector->display_info.bpc, 6U, 8U);
+       else
+               connector->display_info.bpc = 8;
 
        if (mstc->native)
                drm_mode_destroy(mstc->connector.dev, mstc->native);
@@ -1123,8 +1137,10 @@ nv50_mstc_detect(struct drm_connector *connector,
                return connector_status_disconnected;
 
        ret = pm_runtime_get_sync(connector->dev->dev);
-       if (ret < 0 && ret != -EACCES)
+       if (ret < 0 && ret != -EACCES) {
+               pm_runtime_put_autosuspend(connector->dev->dev);
                return connector_status_disconnected;
+       }
 
        ret = drm_dp_mst_detect_port(connector, ctx, mstc->port->mgr,
                                     mstc->port);
@@ -1659,6 +1675,7 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
        struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
        struct nouveau_encoder *nv_encoder;
        struct drm_encoder *encoder;
+       struct nv50_disp *disp = nv50_disp(connector->dev);
        int type, ret;
 
        switch (dcbe->type) {
@@ -1685,10 +1702,12 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
 
        drm_connector_attach_encoder(connector, encoder);
 
+       disp->core->func->sor->get_caps(disp, nv_encoder, ffs(dcbe->or) - 1);
+
        if (dcbe->type == DCB_OUTPUT_DP) {
-               struct nv50_disp *disp = nv50_disp(encoder->dev);
                struct nvkm_i2c_aux *aux =
                        nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
+
                if (aux) {
                        if (disp->disp->object.oclass < GF110_DISP) {
                                /* HW has no support for address-only
@@ -1801,7 +1820,9 @@ nv50_pior_func = {
 static int
 nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
 {
-       struct nouveau_drm *drm = nouveau_drm(connector->dev);
+       struct drm_device *dev = connector->dev;
+       struct nouveau_drm *drm = nouveau_drm(dev);
+       struct nv50_disp *disp = nv50_disp(dev);
        struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
        struct nvkm_i2c_bus *bus = NULL;
        struct nvkm_i2c_aux *aux = NULL;
@@ -1840,6 +1861,9 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
        drm_encoder_helper_add(encoder, &nv50_pior_help);
 
        drm_connector_attach_encoder(connector, encoder);
+
+       disp->core->func->pior->get_caps(disp, nv_encoder, ffs(dcbe->or) - 1);
+
        return 0;
 }
 
@@ -2369,7 +2393,8 @@ nv50_display_init(struct drm_device *dev, bool resume, bool runtime)
        struct drm_encoder *encoder;
        struct drm_plane *plane;
 
-       core->func->init(core);
+       if (resume || runtime)
+               core->func->init(core);
 
        list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
                if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
@@ -2396,6 +2421,8 @@ nv50_display_destroy(struct drm_device *dev)
 
        nv50_audio_component_fini(nouveau_drm(dev));
 
+       nvif_object_unmap(&disp->caps);
+       nvif_object_fini(&disp->caps);
        nv50_core_del(&disp->core);
 
        nouveau_bo_unmap(disp->sync);
@@ -2456,6 +2483,22 @@ nv50_display_create(struct drm_device *dev)
        if (ret)
                goto out;
 
+       disp->core->func->init(disp->core);
+       if (disp->core->func->caps_init) {
+               ret = disp->core->func->caps_init(drm, disp);
+               if (ret)
+                       goto out;
+       }
+
+       /* Assign the correct format modifiers */
+       if (disp->disp->object.oclass >= TU102_DISP)
+               nouveau_display(dev)->format_modifiers = wndwc57e_modifiers;
+       else
+       if (disp->disp->object.oclass >= GF110_DISP)
+               nouveau_display(dev)->format_modifiers = disp90xx_modifiers;
+       else
+               nouveau_display(dev)->format_modifiers = disp50xx_modifiers;
+
        /* create crtc objects to represent the hw heads */
        if (disp->disp->object.oclass >= GV100_DISP)
                crtcs = nvif_rd32(&device->object, 0x610060) & 0xff;
@@ -2551,3 +2594,53 @@ out:
                nv50_display_destroy(dev);
        return ret;
 }
+
+/******************************************************************************
+ * Format modifiers
+ *****************************************************************************/
+
+/****************************************************************
+ *            Log2(block height) ----------------------------+  *
+ *            Page Kind ----------------------------------+  |  *
+ *            Gob Height/Page Kind Generation ------+     |  |  *
+ *                          Sector layout -------+  |     |  |  *
+ *                          Compression ------+  |  |     |  |  */
+const u64 disp50xx_modifiers[] = { /*         |  |  |     |  |  */
+       DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 0),
+       DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 1),
+       DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 2),
+       DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 3),
+       DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 4),
+       DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 5),
+       DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 0),
+       DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 1),
+       DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 2),
+       DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 3),
+       DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 4),
+       DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 5),
+       DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 0),
+       DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 1),
+       DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 2),
+       DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 3),
+       DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 4),
+       DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 5),
+       DRM_FORMAT_MOD_LINEAR,
+       DRM_FORMAT_MOD_INVALID
+};
+
+/****************************************************************
+ *            Log2(block height) ----------------------------+  *
+ *            Page Kind ----------------------------------+  |  *
+ *            Gob Height/Page Kind Generation ------+     |  |  *
+ *                          Sector layout -------+  |     |  |  *
+ *                          Compression ------+  |  |     |  |  */
+const u64 disp90xx_modifiers[] = { /*         |  |  |     |  |  */
+       DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 0),
+       DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 1),
+       DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 2),
+       DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 3),
+       DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 4),
+       DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 5),
+       DRM_FORMAT_MOD_LINEAR,
+       DRM_FORMAT_MOD_INVALID
+};
index d54fe00ac3a3c22e1e0fc06638899c156a4cb5c9..696e70a6b98b670e567058d185c5ea3c3b34b3e1 100644 (file)
@@ -9,6 +9,7 @@ struct nv50_msto;
 struct nv50_disp {
        struct nvif_disp *disp;
        struct nv50_core *core;
+       struct nvif_object caps;
 
 #define NV50_DISP_SYNC(c, o)                                ((c) * 0x040 + (o))
 #define NV50_DISP_CORE_NTFY                       NV50_DISP_SYNC(0      , 0x00)
@@ -78,6 +79,10 @@ void nv50_dmac_destroy(struct nv50_dmac *);
 u32 *evo_wait(struct nv50_dmac *, int nr);
 void evo_kick(u32 *, struct nv50_dmac *);
 
+extern const u64 disp50xx_modifiers[];
+extern const u64 disp90xx_modifiers[];
+extern const u64 wndwc57e_modifiers[];
+
 #define evo_mthd(p, m, s) do {                                         \
        const u32 _m = (m), _s = (s);                                   \
        if (drm_debug_enabled(DRM_UT_KMS))                              \
index 00011ce109a629da6a1f5f47453e1ecc3f298aac..4a9a32b89f746f98a9970d26bf64906a27000fca 100644 (file)
@@ -168,14 +168,15 @@ headc37d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
        struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
        struct nv50_head_mode *m = &asyh->mode;
        u32 *push;
-       if ((push = evo_wait(core, 12))) {
+       if ((push = evo_wait(core, 13))) {
                evo_mthd(push, 0x2064 + (head->base.index * 0x400), 5);
                evo_data(push, (m->v.active  << 16) | m->h.active );
                evo_data(push, (m->v.synce   << 16) | m->h.synce  );
                evo_data(push, (m->v.blanke  << 16) | m->h.blanke );
                evo_data(push, (m->v.blanks  << 16) | m->h.blanks );
                evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
-               evo_mthd(push, 0x200c + (head->base.index * 0x400), 1);
+               evo_mthd(push, 0x2008 + (head->base.index * 0x400), 2);
+               evo_data(push, m->interlace);
                evo_data(push, m->clock * 1000);
                evo_mthd(push, 0x2028 + (head->base.index * 0x400), 1);
                evo_data(push, m->clock * 1000);
index 938d910a1b1e4acd1a65900415367141d056d0fd..859131a8bc3c89fa377ba6b40033203123d5d10c 100644 (file)
@@ -173,14 +173,15 @@ headc57d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
        struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
        struct nv50_head_mode *m = &asyh->mode;
        u32 *push;
-       if ((push = evo_wait(core, 12))) {
+       if ((push = evo_wait(core, 13))) {
                evo_mthd(push, 0x2064 + (head->base.index * 0x400), 5);
                evo_data(push, (m->v.active  << 16) | m->h.active );
                evo_data(push, (m->v.synce   << 16) | m->h.synce  );
                evo_data(push, (m->v.blanke  << 16) | m->h.blanke );
                evo_data(push, (m->v.blanks  << 16) | m->h.blanks );
                evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
-               evo_mthd(push, 0x200c + (head->base.index * 0x400), 1);
+               evo_mthd(push, 0x2008 + (head->base.index * 0x400), 2);
+               evo_data(push, m->interlace);
                evo_data(push, m->clock * 1000);
                evo_mthd(push, 0x2028 + (head->base.index * 0x400), 1);
                evo_data(push, m->clock * 1000);
index d2bac6a341dcb696204016f1c17fd00dd4357d77..45d8ce7d2c28318d289ba21477c5b980582dd256 100644 (file)
@@ -38,7 +38,15 @@ pior507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
        }
 }
 
+static void
+pior507d_get_caps(struct nv50_disp *disp, struct nouveau_encoder *outp,
+                 int or)
+{
+       outp->caps.dp_interlace = true;
+}
+
 const struct nv50_outp_func
 pior507d = {
        .ctrl = pior507d_ctrl,
+       .get_caps = pior507d_get_caps,
 };
index 5222fe6a9b21cd4ca1e7e064af14c2c3d94c8a82..9a59fa7da00dc4963fa9d0a66d31178a68cb54dd 100644 (file)
@@ -38,7 +38,14 @@ sor507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
        }
 }
 
+static void
+sor507d_get_caps(struct nv50_disp *core, struct nouveau_encoder *outp, int or)
+{
+       outp->caps.dp_interlace = true;
+}
+
 const struct nv50_outp_func
 sor507d = {
        .ctrl = sor507d_ctrl,
+       .get_caps = sor507d_get_caps,
 };
index b0314ec11fb3bc121e30b36e9e9491b00f00e0ee..9577ccf1c809b1d9e2095f56a3500d155b2e134e 100644 (file)
@@ -21,6 +21,7 @@
  */
 #include "core.h"
 
+#include <nouveau_bo.h>
 #include <nvif/class.h>
 
 static void
@@ -35,7 +36,17 @@ sor907d_ctrl(struct nv50_core *core, int or, u32 ctrl,
        }
 }
 
+static void
+sor907d_get_caps(struct nv50_disp *disp, struct nouveau_encoder *outp, int or)
+{
+       const int off = or * 2;
+       u32 tmp = nouveau_bo_rd32(disp->sync, 0x000014 + off);
+
+       outp->caps.dp_interlace = !!(tmp & 0x04000000);
+}
+
 const struct nv50_outp_func
 sor907d = {
        .ctrl = sor907d_ctrl,
+       .get_caps = sor907d_get_caps,
 };
index dff059241c5ddda510ca5636587605e129356ac6..c86ca955fdcd4b125f0203d5530d54c229ed5710 100644 (file)
@@ -33,7 +33,16 @@ sorc37d_ctrl(struct nv50_core *core, int or, u32 ctrl,
        }
 }
 
+static void
+sorc37d_get_caps(struct nv50_disp *disp, struct nouveau_encoder *outp, int or)
+{
+       u32 tmp = nvif_rd32(&disp->caps, 0x000144 + (or * 8));
+
+       outp->caps.dp_interlace = !!(tmp & 0x04000000);
+}
+
 const struct nv50_outp_func
 sorc37d = {
        .ctrl = sorc37d_ctrl,
+       .get_caps = sorc37d_get_caps,
 };
index bb737f9281e692f2adf4a3c8e93d7081b022b7ea..e25ead56052cd8dfe6515822c8479dde737c929b 100644 (file)
@@ -29,6 +29,7 @@
 #include <drm/drm_fourcc.h>
 
 #include "nouveau_bo.h"
+#include "nouveau_gem.h"
 
 static void
 nv50_wndw_ctxdma_del(struct nv50_wndw_ctxdma *ctxdma)
@@ -39,12 +40,13 @@ nv50_wndw_ctxdma_del(struct nv50_wndw_ctxdma *ctxdma)
 }
 
 static struct nv50_wndw_ctxdma *
-nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct nouveau_framebuffer *fb)
+nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct drm_framebuffer *fb)
 {
-       struct nouveau_drm *drm = nouveau_drm(fb->base.dev);
+       struct nouveau_drm *drm = nouveau_drm(fb->dev);
        struct nv50_wndw_ctxdma *ctxdma;
-       const u8    kind = fb->nvbo->kind;
-       const u32 handle = 0xfb000000 | kind;
+       u32 handle;
+       u32 unused;
+       u8  kind;
        struct {
                struct nv_dma_v0 base;
                union {
@@ -56,6 +58,9 @@ nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct nouveau_framebuffer *fb)
        u32 argc = sizeof(args.base);
        int ret;
 
+       nouveau_framebuffer_get_layout(fb, &unused, &kind);
+       handle = 0xfb000000 | kind;
+
        list_for_each_entry(ctxdma, &wndw->ctxdma.list, head) {
                if (ctxdma->object.handle == handle)
                        return ctxdma;
@@ -234,16 +239,20 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, bool modeset,
                               struct nv50_wndw_atom *asyw,
                               struct nv50_head_atom *asyh)
 {
-       struct nouveau_framebuffer *fb = nouveau_framebuffer(asyw->state.fb);
+       struct drm_framebuffer *fb = asyw->state.fb;
        struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
+       uint8_t kind;
+       uint32_t tile_mode;
        int ret;
 
        NV_ATOMIC(drm, "%s acquire\n", wndw->plane.name);
 
-       if (asyw->state.fb != armw->state.fb || !armw->visible || modeset) {
-               asyw->image.w = fb->base.width;
-               asyw->image.h = fb->base.height;
-               asyw->image.kind = fb->nvbo->kind;
+       if (fb != armw->state.fb || !armw->visible || modeset) {
+               nouveau_framebuffer_get_layout(fb, &tile_mode, &kind);
+
+               asyw->image.w = fb->width;
+               asyw->image.h = fb->height;
+               asyw->image.kind = kind;
 
                ret = nv50_wndw_atomic_check_acquire_rgb(asyw);
                if (ret) {
@@ -255,16 +264,16 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, bool modeset,
                if (asyw->image.kind) {
                        asyw->image.layout = 0;
                        if (drm->client.device.info.chipset >= 0xc0)
-                               asyw->image.blockh = fb->nvbo->mode >> 4;
+                               asyw->image.blockh = tile_mode >> 4;
                        else
-                               asyw->image.blockh = fb->nvbo->mode;
-                       asyw->image.blocks[0] = fb->base.pitches[0] / 64;
+                               asyw->image.blockh = tile_mode;
+                       asyw->image.blocks[0] = fb->pitches[0] / 64;
                        asyw->image.pitch[0] = 0;
                } else {
                        asyw->image.layout = 1;
                        asyw->image.blockh = 0;
                        asyw->image.blocks[0] = 0;
-                       asyw->image.pitch[0] = fb->base.pitches[0];
+                       asyw->image.pitch[0] = fb->pitches[0];
                }
 
                if (!asyh->state.async_flip)
@@ -471,47 +480,50 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
 static void
 nv50_wndw_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state)
 {
-       struct nouveau_framebuffer *fb = nouveau_framebuffer(old_state->fb);
        struct nouveau_drm *drm = nouveau_drm(plane->dev);
+       struct nouveau_bo *nvbo;
 
        NV_ATOMIC(drm, "%s cleanup: %p\n", plane->name, old_state->fb);
        if (!old_state->fb)
                return;
 
-       nouveau_bo_unpin(fb->nvbo);
+       nvbo = nouveau_gem_object(old_state->fb->obj[0]);
+       nouveau_bo_unpin(nvbo);
 }
 
 static int
 nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
 {
-       struct nouveau_framebuffer *fb = nouveau_framebuffer(state->fb);
+       struct drm_framebuffer *fb = state->fb;
        struct nouveau_drm *drm = nouveau_drm(plane->dev);
        struct nv50_wndw *wndw = nv50_wndw(plane);
        struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
+       struct nouveau_bo *nvbo;
        struct nv50_head_atom *asyh;
        struct nv50_wndw_ctxdma *ctxdma;
        int ret;
 
-       NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, state->fb);
+       NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, fb);
        if (!asyw->state.fb)
                return 0;
 
-       ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM, true);
+       nvbo = nouveau_gem_object(fb->obj[0]);
+       ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, true);
        if (ret)
                return ret;
 
        if (wndw->ctxdma.parent) {
                ctxdma = nv50_wndw_ctxdma_new(wndw, fb);
                if (IS_ERR(ctxdma)) {
-                       nouveau_bo_unpin(fb->nvbo);
+                       nouveau_bo_unpin(nvbo);
                        return PTR_ERR(ctxdma);
                }
 
                asyw->image.handle[0] = ctxdma->object.handle;
        }
 
-       asyw->state.fence = dma_resv_get_excl_rcu(fb->nvbo->bo.base.resv);
-       asyw->image.offset[0] = fb->nvbo->bo.offset;
+       asyw->state.fence = dma_resv_get_excl_rcu(nvbo->bo.base.resv);
+       asyw->image.offset[0] = nvbo->bo.offset;
 
        if (wndw->func->prepare) {
                asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
@@ -603,6 +615,29 @@ nv50_wndw_destroy(struct drm_plane *plane)
        kfree(wndw);
 }
 
+/* This function assumes the format has already been validated against the plane
+ * and the modifier was validated against the device-wides modifier list at FB
+ * creation time.
+ */
+static bool nv50_plane_format_mod_supported(struct drm_plane *plane,
+                                           u32 format, u64 modifier)
+{
+       struct nouveau_drm *drm = nouveau_drm(plane->dev);
+       uint8_t i;
+
+       if (drm->client.device.info.chipset < 0xc0) {
+               const struct drm_format_info *info = drm_format_info(format);
+               const uint8_t kind = (modifier >> 12) & 0xff;
+
+               if (!format) return false;
+
+               for (i = 0; i < info->num_planes; i++)
+                       if ((info->cpp[i] != 4) && kind != 0x70) return false;
+       }
+
+       return true;
+}
+
 const struct drm_plane_funcs
 nv50_wndw = {
        .update_plane = drm_atomic_helper_update_plane,
@@ -611,6 +646,7 @@ nv50_wndw = {
        .reset = nv50_wndw_reset,
        .atomic_duplicate_state = nv50_wndw_atomic_duplicate_state,
        .atomic_destroy_state = nv50_wndw_atomic_destroy_state,
+       .format_mod_supported = nv50_plane_format_mod_supported,
 };
 
 static int
@@ -658,7 +694,8 @@ nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
        for (nformat = 0; format[nformat]; nformat++);
 
        ret = drm_universal_plane_init(dev, &wndw->plane, heads, &nv50_wndw,
-                                      format, nformat, NULL,
+                                      format, nformat,
+                                      nouveau_display(dev)->format_modifiers,
                                       type, "%s-%d", name, index);
        if (ret) {
                kfree(*pwndw);
index 35c9c52fab263ec7e8c1b99114fbe2203da0c86a..1d64741595ba92b9e817947e4dbe548ce95e74bd 100644 (file)
@@ -173,6 +173,23 @@ wndwc57e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
        return true;
 }
 
+/****************************************************************
+ *            Log2(block height) ----------------------------+  *
+ *            Page Kind ----------------------------------+  |  *
+ *            Gob Height/Page Kind Generation ------+     |  |  *
+ *                          Sector layout -------+  |     |  |  *
+ *                          Compression ------+  |  |     |  |  */
+const u64 wndwc57e_modifiers[] = { /*         |  |  |     |  |  */
+       DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 0),
+       DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 1),
+       DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 2),
+       DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 3),
+       DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 4),
+       DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 5),
+       DRM_FORMAT_MOD_LINEAR,
+       DRM_FORMAT_MOD_INVALID
+};
+
 static const struct nv50_wndw_func
 wndwc57e = {
        .acquire = wndwc37e_acquire,
index 30659747ffe83e2ca130b35fbde938be1e938b8d..2c79beb41126fa75ff55c2479afdba27db841320 100644 (file)
@@ -89,6 +89,8 @@
 #define GV100_DISP                                    /* cl5070.h */ 0x0000c370
 #define TU102_DISP                                    /* cl5070.h */ 0x0000c570
 
+#define GV100_DISP_CAPS                                              0x0000c373
+
 #define NV31_MPEG                                                    0x00003174
 #define G82_MPEG                                                     0x00008274
 
index 1218f28c14baba31cd93e7453de2694cda24e4d1..76288c682e9eaed70332e5ce0a19bf47b0faace2 100644 (file)
@@ -24,6 +24,8 @@ struct nvkm_subdev_func {
 };
 
 extern const char *nvkm_subdev_name[NVKM_SUBDEV_NR];
+int nvkm_subdev_new_(const struct nvkm_subdev_func *, struct nvkm_device *,
+                    int index, struct nvkm_subdev **);
 void nvkm_subdev_ctor(const struct nvkm_subdev_func *, struct nvkm_device *,
                      int index, struct nvkm_subdev *);
 void nvkm_subdev_del(struct nvkm_subdev **);
index fe3a10255c367b4738055769005afd14b3da7265..69a84d0197d0af2ce4544b564f9c9605e9da1e8c 100644 (file)
@@ -49,7 +49,6 @@ static struct nouveau_dsm_priv {
        bool optimus_flags_detected;
        bool optimus_skip_dsm;
        acpi_handle dhandle;
-       acpi_handle rom_handle;
 } nouveau_dsm_priv;
 
 bool nouveau_is_optimus(void) {
@@ -212,37 +211,6 @@ static const struct vga_switcheroo_handler nouveau_dsm_handler = {
        .get_client_id = nouveau_dsm_get_client_id,
 };
 
-/*
- * Firmware supporting Windows 8 or later do not use _DSM to put the device into
- * D3cold, they instead rely on disabling power resources on the parent.
- */
-static bool nouveau_pr3_present(struct pci_dev *pdev)
-{
-       struct pci_dev *parent_pdev = pci_upstream_bridge(pdev);
-       struct acpi_device *parent_adev;
-
-       if (!parent_pdev)
-               return false;
-
-       if (!parent_pdev->bridge_d3) {
-               /*
-                * Parent PCI bridge is currently not power managed.
-                * Since userspace can change these afterwards to be on
-                * the safe side we stick with _DSM and prevent usage of
-                * _PR3 from the bridge.
-                */
-               pci_d3cold_disable(pdev);
-               return false;
-       }
-
-       parent_adev = ACPI_COMPANION(&parent_pdev->dev);
-       if (!parent_adev)
-               return false;
-
-       return parent_adev->power.flags.power_resources &&
-               acpi_has_method(parent_adev->handle, "_PR3");
-}
-
 static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out,
                                  bool *has_mux, bool *has_opt,
                                  bool *has_opt_flags, bool *has_pr3)
@@ -250,6 +218,16 @@ static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out
        acpi_handle dhandle;
        bool supports_mux;
        int optimus_funcs;
+       struct pci_dev *parent_pdev;
+
+       *has_pr3 = false;
+       parent_pdev = pci_upstream_bridge(pdev);
+       if (parent_pdev) {
+               if (parent_pdev->bridge_d3)
+                       *has_pr3 = pci_pr3_present(parent_pdev);
+               else
+                       pci_d3cold_disable(pdev);
+       }
 
        dhandle = ACPI_HANDLE(&pdev->dev);
        if (!dhandle)
@@ -270,7 +248,6 @@ static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out
        *has_mux = supports_mux;
        *has_opt = !!optimus_funcs;
        *has_opt_flags = optimus_funcs & (1 << NOUVEAU_DSM_OPTIMUS_FLAGS);
-       *has_pr3 = false;
 
        if (optimus_funcs) {
                uint32_t result;
@@ -280,8 +257,6 @@ static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out
                         (result & OPTIMUS_ENABLED) ? "enabled" : "disabled",
                         (result & OPTIMUS_DYNAMIC_PWR_CAP) ? "dynamic power, " : "",
                         (result & OPTIMUS_HDA_CODEC_MASK) ? "hda bios codec supported" : "");
-
-               *has_pr3 = nouveau_pr3_present(pdev);
        }
 }
 
@@ -385,59 +360,6 @@ void nouveau_unregister_dsm_handler(void) {}
 void nouveau_switcheroo_optimus_dsm(void) {}
 #endif
 
-/* retrieve the ROM in 4k blocks */
-static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios,
-                           int offset, int len)
-{
-       acpi_status status;
-       union acpi_object rom_arg_elements[2], *obj;
-       struct acpi_object_list rom_arg;
-       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
-
-       rom_arg.count = 2;
-       rom_arg.pointer = &rom_arg_elements[0];
-
-       rom_arg_elements[0].type = ACPI_TYPE_INTEGER;
-       rom_arg_elements[0].integer.value = offset;
-
-       rom_arg_elements[1].type = ACPI_TYPE_INTEGER;
-       rom_arg_elements[1].integer.value = len;
-
-       status = acpi_evaluate_object(rom_handle, NULL, &rom_arg, &buffer);
-       if (ACPI_FAILURE(status)) {
-               pr_info("failed to evaluate ROM got %s\n",
-                       acpi_format_exception(status));
-               return -ENODEV;
-       }
-       obj = (union acpi_object *)buffer.pointer;
-       len = min(len, (int)obj->buffer.length);
-       memcpy(bios+offset, obj->buffer.pointer, len);
-       kfree(buffer.pointer);
-       return len;
-}
-
-bool nouveau_acpi_rom_supported(struct device *dev)
-{
-       acpi_status status;
-       acpi_handle dhandle, rom_handle;
-
-       dhandle = ACPI_HANDLE(dev);
-       if (!dhandle)
-               return false;
-
-       status = acpi_get_handle(dhandle, "_ROM", &rom_handle);
-       if (ACPI_FAILURE(status))
-               return false;
-
-       nouveau_dsm_priv.rom_handle = rom_handle;
-       return true;
-}
-
-int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len)
-{
-       return nouveau_rom_call(nouveau_dsm_priv.rom_handle, bios, offset, len);
-}
-
 void *
 nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
 {
index 1e6e8a8c045592d8a654e0b3ccd01730d5134949..330f9b837066ba5e415445ed38889574e79112d8 100644 (file)
@@ -10,8 +10,6 @@ bool nouveau_is_v1_dsm(void);
 void nouveau_register_dsm_handler(void);
 void nouveau_unregister_dsm_handler(void);
 void nouveau_switcheroo_optimus_dsm(void);
-int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
-bool nouveau_acpi_rom_supported(struct device *);
 void *nouveau_acpi_edid(struct drm_device *, struct drm_connector *);
 #else
 static inline bool nouveau_is_optimus(void) { return false; };
@@ -19,8 +17,6 @@ static inline bool nouveau_is_v1_dsm(void) { return false; };
 static inline void nouveau_register_dsm_handler(void) {}
 static inline void nouveau_unregister_dsm_handler(void) {}
 static inline void nouveau_switcheroo_optimus_dsm(void) {}
-static inline bool nouveau_acpi_rom_supported(struct device *dev) { return false; }
-static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; }
 static inline void *nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) { return NULL; }
 #endif
 
index 9a9a7f5003d3f370fe5f23926a836fd7ad0a6f37..1b383ae0248f30ad3af6b2e0372cb3e1b6e413cf 100644 (file)
@@ -38,6 +38,7 @@
 #include "nouveau_reg.h"
 #include "nouveau_drv.h"
 #include "dispnv04/hw.h"
+#include "dispnv50/disp.h"
 #include "nouveau_acpi.h"
 
 #include "nouveau_display.h"
@@ -509,7 +510,11 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
        nv_connector->detected_encoder = nv_encoder;
 
        if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
-               connector->interlace_allowed = true;
+               if (nv_encoder->dcb->type == DCB_OUTPUT_DP)
+                       connector->interlace_allowed =
+                               nv_encoder->caps.dp_interlace;
+               else
+                       connector->interlace_allowed = true;
                connector->doublescan_allowed = true;
        } else
        if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS ||
@@ -1029,6 +1034,29 @@ get_tmds_link_bandwidth(struct drm_connector *connector)
                return 112000 * duallink_scale;
 }
 
+enum drm_mode_status
+nouveau_conn_mode_clock_valid(const struct drm_display_mode *mode,
+                             const unsigned min_clock,
+                             const unsigned max_clock,
+                             unsigned int *clock_out)
+{
+       unsigned int clock = mode->clock;
+
+       if ((mode->flags & DRM_MODE_FLAG_3D_MASK) ==
+           DRM_MODE_FLAG_3D_FRAME_PACKING)
+               clock *= 2;
+
+       if (clock < min_clock)
+               return MODE_CLOCK_LOW;
+       if (clock > max_clock)
+               return MODE_CLOCK_HIGH;
+
+       if (clock_out)
+               *clock_out = clock;
+
+       return MODE_OK;
+}
+
 static enum drm_mode_status
 nouveau_connector_mode_valid(struct drm_connector *connector,
                             struct drm_display_mode *mode)
@@ -1037,7 +1065,6 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
        struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
        struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
        unsigned min_clock = 25000, max_clock = min_clock;
-       unsigned clock = mode->clock;
 
        switch (nv_encoder->dcb->type) {
        case DCB_OUTPUT_LVDS:
@@ -1060,25 +1087,14 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
        case DCB_OUTPUT_TV:
                return get_slave_funcs(encoder)->mode_valid(encoder, mode);
        case DCB_OUTPUT_DP:
-               max_clock  = nv_encoder->dp.link_nr;
-               max_clock *= nv_encoder->dp.link_bw;
-               clock = clock * (connector->display_info.bpc * 3) / 10;
-               break;
+               return nv50_dp_mode_valid(connector, nv_encoder, mode, NULL);
        default:
                BUG();
                return MODE_BAD;
        }
 
-       if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
-               clock *= 2;
-
-       if (clock < min_clock)
-               return MODE_CLOCK_LOW;
-
-       if (clock > max_clock)
-               return MODE_CLOCK_HIGH;
-
-       return MODE_OK;
+       return nouveau_conn_mode_clock_valid(mode, min_clock, max_clock,
+                                            NULL);
 }
 
 static struct drm_encoder *
index de84fb4708c7ab22f4e5abbf1aa5918500b3bc63..9e062c7adec8feb9a5e5291b5dc875e7c9b3a22f 100644 (file)
@@ -195,6 +195,11 @@ int nouveau_conn_atomic_get_property(struct drm_connector *,
                                     const struct drm_connector_state *,
                                     struct drm_property *, u64 *);
 struct drm_display_mode *nouveau_conn_native_mode(struct drm_connector *);
+enum drm_mode_status
+nouveau_conn_mode_clock_valid(const struct drm_display_mode *,
+                             const unsigned min_clock,
+                             const unsigned max_clock,
+                             unsigned *clock);
 
 #ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
 extern int nouveau_backlight_init(struct drm_connector *);
index 15a3d40edf0292ca16797a482b0b16d73573ff21..63b5c8cf9ae4304c4e477d3619630912ec7c8324 100644 (file)
@@ -181,8 +181,11 @@ nouveau_debugfs_pstate_set(struct file *file, const char __user *ubuf,
        }
 
        ret = pm_runtime_get_sync(drm->dev);
-       if (ret < 0 && ret != -EACCES)
+       if (ret < 0 && ret != -EACCES) {
+               pm_runtime_put_autosuspend(drm->dev);
                return ret;
+       }
+
        ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args));
        pm_runtime_put_autosuspend(drm->dev);
        if (ret < 0)
@@ -217,7 +220,7 @@ static const struct nouveau_debugfs_files {
        {"pstate", &nouveau_pstate_fops},
 };
 
-int
+void
 nouveau_drm_debugfs_init(struct drm_minor *minor)
 {
        struct nouveau_drm *drm = nouveau_drm(minor->dev);
@@ -240,12 +243,10 @@ nouveau_drm_debugfs_init(struct drm_minor *minor)
         */
        dentry = debugfs_lookup("vbios.rom", minor->debugfs_root);
        if (!dentry)
-               return 0;
+               return;
 
        d_inode(dentry)->i_size = drm->vbios.length;
        dput(dentry);
-
-       return 0;
 }
 
 int
index 8909c010e8eaebfe4fe4eb08737a61e01e98be11..77f0323b38ba8cb7b20822a3b98866509e60c46f 100644 (file)
@@ -18,15 +18,13 @@ nouveau_debugfs(struct drm_device *dev)
        return nouveau_drm(dev)->debugfs;
 }
 
-extern int  nouveau_drm_debugfs_init(struct drm_minor *);
+extern void  nouveau_drm_debugfs_init(struct drm_minor *);
 extern int  nouveau_debugfs_init(struct nouveau_drm *);
 extern void nouveau_debugfs_fini(struct nouveau_drm *);
 #else
-static inline int
+static inline void
 nouveau_drm_debugfs_init(struct drm_minor *minor)
-{
-       return 0;
-}
+{}
 
 static inline int
 nouveau_debugfs_init(struct nouveau_drm *drm)
index 700817dc4fa04a74bb1ad8d8e0fc848037c0693f..496c4621cc787dd69028a4d89705f202b13dca48 100644 (file)
@@ -31,6 +31,7 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_fourcc.h>
+#include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_vblank.h>
 
@@ -179,41 +180,164 @@ nouveau_display_vblank_init(struct drm_device *dev)
        return 0;
 }
 
+static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
+       .destroy = drm_gem_fb_destroy,
+       .create_handle = drm_gem_fb_create_handle,
+};
+
 static void
-nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
+nouveau_decode_mod(struct nouveau_drm *drm,
+                  uint64_t modifier,
+                  uint32_t *tile_mode,
+                  uint8_t *kind)
+{
+       BUG_ON(!tile_mode || !kind);
+
+       if (modifier == DRM_FORMAT_MOD_LINEAR) {
+               /* tile_mode will not be used in this case */
+               *tile_mode = 0;
+               *kind = 0;
+       } else {
+               /*
+                * Extract the block height and kind from the corresponding
+                * modifier fields.  See drm_fourcc.h for details.
+                */
+               *tile_mode = (uint32_t)(modifier & 0xF);
+               *kind = (uint8_t)((modifier >> 12) & 0xFF);
+
+               if (drm->client.device.info.chipset >= 0xc0)
+                       *tile_mode <<= 4;
+       }
+}
+
+void
+nouveau_framebuffer_get_layout(struct drm_framebuffer *fb,
+                              uint32_t *tile_mode,
+                              uint8_t *kind)
 {
-       struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
+       if (fb->flags & DRM_MODE_FB_MODIFIERS) {
+               struct nouveau_drm *drm = nouveau_drm(fb->dev);
 
-       if (fb->nvbo)
-               drm_gem_object_put_unlocked(&fb->nvbo->bo.base);
+               nouveau_decode_mod(drm, fb->modifier, tile_mode, kind);
+       } else {
+               const struct nouveau_bo *nvbo = nouveau_gem_object(fb->obj[0]);
 
-       drm_framebuffer_cleanup(drm_fb);
-       kfree(fb);
+               *tile_mode = nvbo->mode;
+               *kind = nvbo->kind;
+       }
 }
 
 static int
-nouveau_user_framebuffer_create_handle(struct drm_framebuffer *drm_fb,
-                                      struct drm_file *file_priv,
-                                      unsigned int *handle)
+nouveau_validate_decode_mod(struct nouveau_drm *drm,
+                           uint64_t modifier,
+                           uint32_t *tile_mode,
+                           uint8_t *kind)
 {
-       struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
+       struct nouveau_display *disp = nouveau_display(drm->dev);
+       int mod;
+
+       if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
+               return -EINVAL;
+       }
 
-       return drm_gem_handle_create(file_priv, &fb->nvbo->bo.base, handle);
+       BUG_ON(!disp->format_modifiers);
+
+       for (mod = 0;
+            (disp->format_modifiers[mod] != DRM_FORMAT_MOD_INVALID) &&
+            (disp->format_modifiers[mod] != modifier);
+            mod++);
+
+       if (disp->format_modifiers[mod] == DRM_FORMAT_MOD_INVALID)
+               return -EINVAL;
+
+       nouveau_decode_mod(drm, modifier, tile_mode, kind);
+
+       return 0;
 }
 
-static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
-       .destroy = nouveau_user_framebuffer_destroy,
-       .create_handle = nouveau_user_framebuffer_create_handle,
-};
+static inline uint32_t
+nouveau_get_width_in_blocks(uint32_t stride)
+{
+       /* GOBs per block in the x direction is always one, and GOBs are
+        * 64 bytes wide
+        */
+       static const uint32_t log_block_width = 6;
+
+       return (stride + (1 << log_block_width) - 1) >> log_block_width;
+}
+
+static inline uint32_t
+nouveau_get_height_in_blocks(struct nouveau_drm *drm,
+                            uint32_t height,
+                            uint32_t log_block_height_in_gobs)
+{
+       uint32_t log_gob_height;
+       uint32_t log_block_height;
+
+       BUG_ON(drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA);
+
+       if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI)
+               log_gob_height = 2;
+       else
+               log_gob_height = 3;
+
+       log_block_height = log_block_height_in_gobs + log_gob_height;
+
+       return (height + (1 << log_block_height) - 1) >> log_block_height;
+}
+
+static int
+nouveau_check_bl_size(struct nouveau_drm *drm, struct nouveau_bo *nvbo,
+                     uint32_t offset, uint32_t stride, uint32_t h,
+                     uint32_t tile_mode)
+{
+       uint32_t gob_size, bw, bh;
+       uint64_t bl_size;
+
+       BUG_ON(drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA);
+
+       if (drm->client.device.info.chipset >= 0xc0) {
+               if (tile_mode & 0xF)
+                       return -EINVAL;
+               tile_mode >>= 4;
+       }
+
+       if (tile_mode & 0xFFFFFFF0)
+               return -EINVAL;
+
+       if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI)
+               gob_size = 256;
+       else
+               gob_size = 512;
+
+       bw = nouveau_get_width_in_blocks(stride);
+       bh = nouveau_get_height_in_blocks(drm, h, tile_mode);
+
+       bl_size = bw * bh * (1 << tile_mode) * gob_size;
+
+       DRM_DEBUG_KMS("offset=%u stride=%u h=%u tile_mode=0x%02x bw=%u bh=%u gob_size=%u bl_size=%llu size=%lu\n",
+                     offset, stride, h, tile_mode, bw, bh, gob_size, bl_size,
+                     nvbo->bo.mem.size);
+
+       if (bl_size + offset > nvbo->bo.mem.size)
+               return -ERANGE;
+
+       return 0;
+}
 
 int
 nouveau_framebuffer_new(struct drm_device *dev,
                        const struct drm_mode_fb_cmd2 *mode_cmd,
-                       struct nouveau_bo *nvbo,
-                       struct nouveau_framebuffer **pfb)
+                       struct drm_gem_object *gem,
+                       struct drm_framebuffer **pfb)
 {
        struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_framebuffer *fb;
+       struct nouveau_bo *nvbo = nouveau_gem_object(gem);
+       struct drm_framebuffer *fb;
+       const struct drm_format_info *info;
+       unsigned int width, height, i;
+       uint32_t tile_mode;
+       uint8_t kind;
        int ret;
 
         /* YUV overlays have special requirements pre-NV50 */
@@ -236,13 +360,50 @@ nouveau_framebuffer_new(struct drm_device *dev,
                return -EINVAL;
        }
 
+       if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
+               if (nouveau_validate_decode_mod(drm, mode_cmd->modifier[0],
+                                               &tile_mode, &kind)) {
+                       DRM_DEBUG_KMS("Unsupported modifier: 0x%llx\n",
+                                     mode_cmd->modifier[0]);
+                       return -EINVAL;
+               }
+       } else {
+               tile_mode = nvbo->mode;
+               kind = nvbo->kind;
+       }
+
+       info = drm_get_format_info(dev, mode_cmd);
+
+       for (i = 0; i < info->num_planes; i++) {
+               width = drm_format_info_plane_width(info,
+                                                   mode_cmd->width,
+                                                   i);
+               height = drm_format_info_plane_height(info,
+                                                     mode_cmd->height,
+                                                     i);
+
+               if (kind) {
+                       ret = nouveau_check_bl_size(drm, nvbo,
+                                                   mode_cmd->offsets[i],
+                                                   mode_cmd->pitches[i],
+                                                   height, tile_mode);
+                       if (ret)
+                               return ret;
+               } else {
+                       uint32_t size = mode_cmd->pitches[i] * height;
+
+                       if (size + mode_cmd->offsets[i] > nvbo->bo.mem.size)
+                               return -ERANGE;
+               }
+       }
+
        if (!(fb = *pfb = kzalloc(sizeof(*fb), GFP_KERNEL)))
                return -ENOMEM;
 
-       drm_helper_mode_fill_fb_struct(dev, &fb->base, mode_cmd);
-       fb->nvbo = nvbo;
+       drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
+       fb->obj[0] = gem;
 
-       ret = drm_framebuffer_init(dev, &fb->base, &nouveau_framebuffer_funcs);
+       ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs);
        if (ret)
                kfree(fb);
        return ret;
@@ -253,19 +414,17 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
                                struct drm_file *file_priv,
                                const struct drm_mode_fb_cmd2 *mode_cmd)
 {
-       struct nouveau_framebuffer *fb;
-       struct nouveau_bo *nvbo;
+       struct drm_framebuffer *fb;
        struct drm_gem_object *gem;
        int ret;
 
        gem = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
        if (!gem)
                return ERR_PTR(-ENOENT);
-       nvbo = nouveau_gem_object(gem);
 
-       ret = nouveau_framebuffer_new(dev, mode_cmd, nvbo, &fb);
+       ret = nouveau_framebuffer_new(dev, mode_cmd, gem, &fb);
        if (ret == 0)
-               return &fb->base;
+               return fb;
 
        drm_gem_object_put_unlocked(gem);
        return ERR_PTR(ret);
@@ -517,6 +676,7 @@ nouveau_display_create(struct drm_device *dev)
 
        dev->mode_config.preferred_depth = 24;
        dev->mode_config.prefer_shadow = 1;
+       dev->mode_config.allow_fb_modifiers = true;
 
        if (drm->client.device.info.chipset < 0x11)
                dev->mode_config.async_page_flip = false;
index de004018ab5c0ecbd26632386194ca11cdc081ba..6e0d900441d69bf176939dd7acad17d3fad082b3 100644 (file)
@@ -8,26 +8,11 @@
 
 #include <drm/drm_framebuffer.h>
 
-struct nouveau_framebuffer {
-       struct drm_framebuffer base;
-       struct nouveau_bo *nvbo;
-       struct nouveau_vma *vma;
-       u32 r_handle;
-       u32 r_format;
-       u32 r_pitch;
-       struct nvif_object h_base[4];
-       struct nvif_object h_core;
-};
-
-static inline struct nouveau_framebuffer *
-nouveau_framebuffer(struct drm_framebuffer *fb)
-{
-       return container_of(fb, struct nouveau_framebuffer, base);
-}
-
-int nouveau_framebuffer_new(struct drm_device *,
-                           const struct drm_mode_fb_cmd2 *,
-                           struct nouveau_bo *, struct nouveau_framebuffer **);
+int
+nouveau_framebuffer_new(struct drm_device *dev,
+                       const struct drm_mode_fb_cmd2 *mode_cmd,
+                       struct drm_gem_object *gem,
+                       struct drm_framebuffer **pfb);
 
 struct nouveau_display {
        void *priv;
@@ -47,6 +32,8 @@ struct nouveau_display {
        struct drm_property *color_vibrance_property;
 
        struct drm_atomic_state *suspend;
+
+       const u64 *format_modifiers;
 };
 
 static inline struct nouveau_display *
@@ -75,6 +62,10 @@ int  nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *,
 
 void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *);
 
+void
+nouveau_framebuffer_get_layout(struct drm_framebuffer *fb, uint32_t *tile_mode,
+                              uint8_t *kind);
+
 struct drm_framebuffer *
 nouveau_user_framebuffer_create(struct drm_device *, struct drm_file *,
                                const struct drm_mode_fb_cmd2 *);
index 3364904eccff5a4bda4c1fbaea8f5b8e4923e98b..e5c230d9ae24ed2ccca95c4d25c6765ef26beca0 100644 (file)
 #include "nouveau_dma.h"
 #include "nouveau_mem.h"
 #include "nouveau_bo.h"
+#include "nouveau_svm.h"
 
 #include <nvif/class.h>
 #include <nvif/object.h>
 #include <nvif/if000c.h>
 #include <nvif/if500b.h>
 #include <nvif/if900b.h>
+#include <nvif/if000c.h>
 
 #include <linux/sched/mm.h>
 #include <linux/hmm.h>
@@ -54,66 +56,69 @@ enum nouveau_aper {
 typedef int (*nouveau_migrate_copy_t)(struct nouveau_drm *drm, u64 npages,
                                      enum nouveau_aper, u64 dst_addr,
                                      enum nouveau_aper, u64 src_addr);
+typedef int (*nouveau_clear_page_t)(struct nouveau_drm *drm, u32 length,
+                                     enum nouveau_aper, u64 dst_addr);
 
 struct nouveau_dmem_chunk {
        struct list_head list;
        struct nouveau_bo *bo;
        struct nouveau_drm *drm;
-       unsigned long pfn_first;
        unsigned long callocated;
-       unsigned long bitmap[BITS_TO_LONGS(DMEM_CHUNK_NPAGES)];
-       spinlock_t lock;
+       struct dev_pagemap pagemap;
 };
 
 struct nouveau_dmem_migrate {
        nouveau_migrate_copy_t copy_func;
+       nouveau_clear_page_t clear_func;
        struct nouveau_channel *chan;
 };
 
 struct nouveau_dmem {
        struct nouveau_drm *drm;
-       struct dev_pagemap pagemap;
        struct nouveau_dmem_migrate migrate;
-       struct list_head chunk_free;
-       struct list_head chunk_full;
-       struct list_head chunk_empty;
+       struct list_head chunks;
        struct mutex mutex;
+       struct page *free_pages;
+       spinlock_t lock;
 };
 
-static inline struct nouveau_dmem *page_to_dmem(struct page *page)
+static struct nouveau_dmem_chunk *nouveau_page_to_chunk(struct page *page)
+{
+       return container_of(page->pgmap, struct nouveau_dmem_chunk, pagemap);
+}
+
+static struct nouveau_drm *page_to_drm(struct page *page)
 {
-       return container_of(page->pgmap, struct nouveau_dmem, pagemap);
+       struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
+
+       return chunk->drm;
 }
 
 unsigned long nouveau_dmem_page_addr(struct page *page)
 {
-       struct nouveau_dmem_chunk *chunk = page->zone_device_data;
-       unsigned long idx = page_to_pfn(page) - chunk->pfn_first;
+       struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
+       unsigned long off = (page_to_pfn(page) << PAGE_SHIFT) -
+                               chunk->pagemap.res.start;
 
-       return (idx << PAGE_SHIFT) + chunk->bo->bo.offset;
+       return chunk->bo->bo.offset + off;
 }
 
 static void nouveau_dmem_page_free(struct page *page)
 {
-       struct nouveau_dmem_chunk *chunk = page->zone_device_data;
-       unsigned long idx = page_to_pfn(page) - chunk->pfn_first;
+       struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
+       struct nouveau_dmem *dmem = chunk->drm->dmem;
+
+       spin_lock(&dmem->lock);
+       page->zone_device_data = dmem->free_pages;
+       dmem->free_pages = page;
 
-       /*
-        * FIXME:
-        *
-        * This is really a bad example, we need to overhaul nouveau memory
-        * management to be more page focus and allow lighter locking scheme
-        * to be use in the process.
-        */
-       spin_lock(&chunk->lock);
-       clear_bit(idx, chunk->bitmap);
        WARN_ON(!chunk->callocated);
        chunk->callocated--;
        /*
         * FIXME when chunk->callocated reach 0 we should add the chunk to
         * a reclaim list so that it can be freed in case of memory pressure.
         */
-       spin_unlock(&chunk->lock);
+       spin_unlock(&dmem->lock);
 }
 
 static void nouveau_dmem_fence_done(struct nouveau_fence **fence)
@@ -165,8 +170,8 @@ error_free_page:
 
 static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
 {
-       struct nouveau_dmem *dmem = page_to_dmem(vmf->page);
-       struct nouveau_drm *drm = dmem->drm;
+       struct nouveau_drm *drm = page_to_drm(vmf->page);
+       struct nouveau_dmem *dmem = drm->dmem;
        struct nouveau_fence *fence;
        unsigned long src = 0, dst = 0;
        dma_addr_t dma_addr = 0;
@@ -209,131 +214,105 @@ static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops = {
 };
 
 static int
-nouveau_dmem_chunk_alloc(struct nouveau_drm *drm)
+nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
 {
        struct nouveau_dmem_chunk *chunk;
+       struct resource *res;
+       struct page *page;
+       void *ptr;
+       unsigned long i, pfn_first;
        int ret;
 
-       if (drm->dmem == NULL)
-               return -EINVAL;
-
-       mutex_lock(&drm->dmem->mutex);
-       chunk = list_first_entry_or_null(&drm->dmem->chunk_empty,
-                                        struct nouveau_dmem_chunk,
-                                        list);
+       chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
        if (chunk == NULL) {
-               mutex_unlock(&drm->dmem->mutex);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto out;
        }
 
-       list_del(&chunk->list);
-       mutex_unlock(&drm->dmem->mutex);
+       /* Allocate unused physical address space for device private pages. */
+       res = request_free_mem_region(&iomem_resource, DMEM_CHUNK_SIZE,
+                                     "nouveau_dmem");
+       if (IS_ERR(res)) {
+               ret = PTR_ERR(res);
+               goto out_free;
+       }
+
+       chunk->drm = drm;
+       chunk->pagemap.type = MEMORY_DEVICE_PRIVATE;
+       chunk->pagemap.res = *res;
+       chunk->pagemap.ops = &nouveau_dmem_pagemap_ops;
+       chunk->pagemap.owner = drm->dev;
 
        ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0,
                             TTM_PL_FLAG_VRAM, 0, 0, NULL, NULL,
                             &chunk->bo);
        if (ret)
-               goto out;
+               goto out_release;
 
        ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
-       if (ret) {
-               nouveau_bo_ref(NULL, &chunk->bo);
-               goto out;
-       }
+       if (ret)
+               goto out_bo_free;
 
-       bitmap_zero(chunk->bitmap, DMEM_CHUNK_NPAGES);
-       spin_lock_init(&chunk->lock);
+       ptr = memremap_pages(&chunk->pagemap, numa_node_id());
+       if (IS_ERR(ptr)) {
+               ret = PTR_ERR(ptr);
+               goto out_bo_unpin;
+       }
 
-out:
        mutex_lock(&drm->dmem->mutex);
-       if (chunk->bo)
-               list_add(&chunk->list, &drm->dmem->chunk_empty);
-       else
-               list_add_tail(&chunk->list, &drm->dmem->chunk_empty);
+       list_add(&chunk->list, &drm->dmem->chunks);
        mutex_unlock(&drm->dmem->mutex);
 
-       return ret;
-}
-
-static struct nouveau_dmem_chunk *
-nouveau_dmem_chunk_first_free_locked(struct nouveau_drm *drm)
-{
-       struct nouveau_dmem_chunk *chunk;
-
-       chunk = list_first_entry_or_null(&drm->dmem->chunk_free,
-                                        struct nouveau_dmem_chunk,
-                                        list);
-       if (chunk)
-               return chunk;
-
-       chunk = list_first_entry_or_null(&drm->dmem->chunk_empty,
-                                        struct nouveau_dmem_chunk,
-                                        list);
-       if (chunk->bo)
-               return chunk;
-
-       return NULL;
-}
-
-static int
-nouveau_dmem_pages_alloc(struct nouveau_drm *drm,
-                        unsigned long npages,
-                        unsigned long *pages)
-{
-       struct nouveau_dmem_chunk *chunk;
-       unsigned long c;
-       int ret;
-
-       memset(pages, 0xff, npages * sizeof(*pages));
-
-       mutex_lock(&drm->dmem->mutex);
-       for (c = 0; c < npages;) {
-               unsigned long i;
-
-               chunk = nouveau_dmem_chunk_first_free_locked(drm);
-               if (chunk == NULL) {
-                       mutex_unlock(&drm->dmem->mutex);
-                       ret = nouveau_dmem_chunk_alloc(drm);
-                       if (ret) {
-                               if (c)
-                                       return 0;
-                               return ret;
-                       }
-                       mutex_lock(&drm->dmem->mutex);
-                       continue;
-               }
-
-               spin_lock(&chunk->lock);
-               i = find_first_zero_bit(chunk->bitmap, DMEM_CHUNK_NPAGES);
-               while (i < DMEM_CHUNK_NPAGES && c < npages) {
-                       pages[c] = chunk->pfn_first + i;
-                       set_bit(i, chunk->bitmap);
-                       chunk->callocated++;
-                       c++;
-
-                       i = find_next_zero_bit(chunk->bitmap,
-                                       DMEM_CHUNK_NPAGES, i);
-               }
-               spin_unlock(&chunk->lock);
+       pfn_first = chunk->pagemap.res.start >> PAGE_SHIFT;
+       page = pfn_to_page(pfn_first);
+       spin_lock(&drm->dmem->lock);
+       for (i = 0; i < DMEM_CHUNK_NPAGES - 1; ++i, ++page) {
+               page->zone_device_data = drm->dmem->free_pages;
+               drm->dmem->free_pages = page;
        }
-       mutex_unlock(&drm->dmem->mutex);
+       *ppage = page;
+       chunk->callocated++;
+       spin_unlock(&drm->dmem->lock);
+
+       NV_INFO(drm, "DMEM: registered %ldMB of device memory\n",
+               DMEM_CHUNK_SIZE >> 20);
 
        return 0;
+
+out_bo_unpin:
+       nouveau_bo_unpin(chunk->bo);
+out_bo_free:
+       nouveau_bo_ref(NULL, &chunk->bo);
+out_release:
+       release_mem_region(chunk->pagemap.res.start,
+                          resource_size(&chunk->pagemap.res));
+out_free:
+       kfree(chunk);
+out:
+       return ret;
 }
 
 static struct page *
 nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
 {
-       unsigned long pfns[1];
-       struct page *page;
+       struct nouveau_dmem_chunk *chunk;
+       struct page *page = NULL;
        int ret;
 
-       /* FIXME stop all the miss-match API ... */
-       ret = nouveau_dmem_pages_alloc(drm, 1, pfns);
-       if (ret)
-               return NULL;
+       spin_lock(&drm->dmem->lock);
+       if (drm->dmem->free_pages) {
+               page = drm->dmem->free_pages;
+               drm->dmem->free_pages = page->zone_device_data;
+               chunk = nouveau_page_to_chunk(page);
+               chunk->callocated++;
+               spin_unlock(&drm->dmem->lock);
+       } else {
+               spin_unlock(&drm->dmem->lock);
+               ret = nouveau_dmem_chunk_alloc(drm, &page);
+               if (ret)
+                       return NULL;
+       }
 
-       page = pfn_to_page(pfns[0]);
        get_page(page);
        lock_page(page);
        return page;
@@ -356,12 +335,7 @@ nouveau_dmem_resume(struct nouveau_drm *drm)
                return;
 
        mutex_lock(&drm->dmem->mutex);
-       list_for_each_entry (chunk, &drm->dmem->chunk_free, list) {
-               ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
-               /* FIXME handle pin failure */
-               WARN_ON(ret);
-       }
-       list_for_each_entry (chunk, &drm->dmem->chunk_full, list) {
+       list_for_each_entry(chunk, &drm->dmem->chunks, list) {
                ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
                /* FIXME handle pin failure */
                WARN_ON(ret);
@@ -378,12 +352,8 @@ nouveau_dmem_suspend(struct nouveau_drm *drm)
                return;
 
        mutex_lock(&drm->dmem->mutex);
-       list_for_each_entry (chunk, &drm->dmem->chunk_free, list) {
-               nouveau_bo_unpin(chunk->bo);
-       }
-       list_for_each_entry (chunk, &drm->dmem->chunk_full, list) {
+       list_for_each_entry(chunk, &drm->dmem->chunks, list)
                nouveau_bo_unpin(chunk->bo);
-       }
        mutex_unlock(&drm->dmem->mutex);
 }
 
@@ -397,15 +367,13 @@ nouveau_dmem_fini(struct nouveau_drm *drm)
 
        mutex_lock(&drm->dmem->mutex);
 
-       WARN_ON(!list_empty(&drm->dmem->chunk_free));
-       WARN_ON(!list_empty(&drm->dmem->chunk_full));
-
-       list_for_each_entry_safe (chunk, tmp, &drm->dmem->chunk_empty, list) {
-               if (chunk->bo) {
-                       nouveau_bo_unpin(chunk->bo);
-                       nouveau_bo_ref(NULL, &chunk->bo);
-               }
+       list_for_each_entry_safe(chunk, tmp, &drm->dmem->chunks, list) {
+               nouveau_bo_unpin(chunk->bo);
+               nouveau_bo_ref(NULL, &chunk->bo);
                list_del(&chunk->list);
+               memunmap_pages(&chunk->pagemap);
+               release_mem_region(chunk->pagemap.res.start,
+                                  resource_size(&chunk->pagemap.res));
                kfree(chunk);
        }
 
@@ -471,6 +439,52 @@ nvc0b5_migrate_copy(struct nouveau_drm *drm, u64 npages,
        return 0;
 }
 
+static int
+nvc0b5_migrate_clear(struct nouveau_drm *drm, u32 length,
+                    enum nouveau_aper dst_aper, u64 dst_addr)
+{
+       struct nouveau_channel *chan = drm->dmem->migrate.chan;
+       u32 launch_dma = (1 << 10) /* REMAP_ENABLE_TRUE */ |
+                        (1 << 8) /* DST_MEMORY_LAYOUT_PITCH. */ |
+                        (1 << 7) /* SRC_MEMORY_LAYOUT_PITCH. */ |
+                        (1 << 2) /* FLUSH_ENABLE_TRUE. */ |
+                        (2 << 0) /* DATA_TRANSFER_TYPE_NON_PIPELINED. */;
+       u32 remap = (4 <<  0) /* DST_X_CONST_A */ |
+                   (5 <<  4) /* DST_Y_CONST_B */ |
+                   (3 << 16) /* COMPONENT_SIZE_FOUR */ |
+                   (1 << 24) /* NUM_DST_COMPONENTS_TWO */;
+       int ret;
+
+       ret = RING_SPACE(chan, 12);
+       if (ret)
+               return ret;
+
+       switch (dst_aper) {
+       case NOUVEAU_APER_VRAM:
+               BEGIN_IMC0(chan, NvSubCopy, 0x0264, 0);
+                       break;
+       case NOUVEAU_APER_HOST:
+               BEGIN_IMC0(chan, NvSubCopy, 0x0264, 1);
+               break;
+       default:
+               return -EINVAL;
+       }
+       launch_dma |= 0x00002000; /* DST_TYPE_PHYSICAL. */
+
+       BEGIN_NVC0(chan, NvSubCopy, 0x0700, 3);
+       OUT_RING(chan, 0);
+       OUT_RING(chan, 0);
+       OUT_RING(chan, remap);
+       BEGIN_NVC0(chan, NvSubCopy, 0x0408, 2);
+       OUT_RING(chan, upper_32_bits(dst_addr));
+       OUT_RING(chan, lower_32_bits(dst_addr));
+       BEGIN_NVC0(chan, NvSubCopy, 0x0418, 1);
+       OUT_RING(chan, length >> 3);
+       BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
+       OUT_RING(chan, launch_dma);
+       return 0;
+}
+
 static int
 nouveau_dmem_migrate_init(struct nouveau_drm *drm)
 {
@@ -480,6 +494,7 @@ nouveau_dmem_migrate_init(struct nouveau_drm *drm)
        case  VOLTA_DMA_COPY_A:
        case TURING_DMA_COPY_A:
                drm->dmem->migrate.copy_func = nvc0b5_migrate_copy;
+               drm->dmem->migrate.clear_func = nvc0b5_migrate_clear;
                drm->dmem->migrate.chan = drm->ttm.chan;
                return 0;
        default:
@@ -491,9 +506,6 @@ nouveau_dmem_migrate_init(struct nouveau_drm *drm)
 void
 nouveau_dmem_init(struct nouveau_drm *drm)
 {
-       struct device *device = drm->dev->dev;
-       struct resource *res;
-       unsigned long i, size, pfn_first;
        int ret;
 
        /* This only make sense on PASCAL or newer */
@@ -505,84 +517,53 @@ nouveau_dmem_init(struct nouveau_drm *drm)
 
        drm->dmem->drm = drm;
        mutex_init(&drm->dmem->mutex);
-       INIT_LIST_HEAD(&drm->dmem->chunk_free);
-       INIT_LIST_HEAD(&drm->dmem->chunk_full);
-       INIT_LIST_HEAD(&drm->dmem->chunk_empty);
-
-       size = ALIGN(drm->client.device.info.ram_user, DMEM_CHUNK_SIZE);
+       INIT_LIST_HEAD(&drm->dmem->chunks);
+       mutex_init(&drm->dmem->mutex);
+       spin_lock_init(&drm->dmem->lock);
 
        /* Initialize migration dma helpers before registering memory */
        ret = nouveau_dmem_migrate_init(drm);
-       if (ret)
-               goto out_free;
-
-       /*
-        * FIXME we need some kind of policy to decide how much VRAM we
-        * want to register with HMM. For now just register everything
-        * and latter if we want to do thing like over commit then we
-        * could revisit this.
-        */
-       res = devm_request_free_mem_region(device, &iomem_resource, size);
-       if (IS_ERR(res))
-               goto out_free;
-       drm->dmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
-       drm->dmem->pagemap.res = *res;
-       drm->dmem->pagemap.ops = &nouveau_dmem_pagemap_ops;
-       drm->dmem->pagemap.owner = drm->dev;
-       if (IS_ERR(devm_memremap_pages(device, &drm->dmem->pagemap)))
-               goto out_free;
-
-       pfn_first = res->start >> PAGE_SHIFT;
-       for (i = 0; i < (size / DMEM_CHUNK_SIZE); ++i) {
-               struct nouveau_dmem_chunk *chunk;
-               struct page *page;
-               unsigned long j;
-
-               chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
-               if (chunk == NULL) {
-                       nouveau_dmem_fini(drm);
-                       return;
-               }
-
-               chunk->drm = drm;
-               chunk->pfn_first = pfn_first + (i * DMEM_CHUNK_NPAGES);
-               list_add_tail(&chunk->list, &drm->dmem->chunk_empty);
-
-               page = pfn_to_page(chunk->pfn_first);
-               for (j = 0; j < DMEM_CHUNK_NPAGES; ++j, ++page)
-                       page->zone_device_data = chunk;
+       if (ret) {
+               kfree(drm->dmem);
+               drm->dmem = NULL;
        }
-
-       NV_INFO(drm, "DMEM: registered %ldMB of device memory\n", size >> 20);
-       return;
-out_free:
-       kfree(drm->dmem);
-       drm->dmem = NULL;
 }
 
 static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
-               unsigned long src, dma_addr_t *dma_addr)
+               unsigned long src, dma_addr_t *dma_addr, u64 *pfn)
 {
        struct device *dev = drm->dev->dev;
        struct page *dpage, *spage;
+       unsigned long paddr;
 
        spage = migrate_pfn_to_page(src);
-       if (!spage || !(src & MIGRATE_PFN_MIGRATE))
+       if (!(src & MIGRATE_PFN_MIGRATE))
                goto out;
 
        dpage = nouveau_dmem_page_alloc_locked(drm);
        if (!dpage)
-               return 0;
-
-       *dma_addr = dma_map_page(dev, spage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
-       if (dma_mapping_error(dev, *dma_addr))
-               goto out_free_page;
+               goto out;
 
-       if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_VRAM,
-                       nouveau_dmem_page_addr(dpage), NOUVEAU_APER_HOST,
-                       *dma_addr))
-               goto out_dma_unmap;
+       paddr = nouveau_dmem_page_addr(dpage);
+       if (spage) {
+               *dma_addr = dma_map_page(dev, spage, 0, page_size(spage),
+                                        DMA_BIDIRECTIONAL);
+               if (dma_mapping_error(dev, *dma_addr))
+                       goto out_free_page;
+               if (drm->dmem->migrate.copy_func(drm, page_size(spage),
+                       NOUVEAU_APER_VRAM, paddr, NOUVEAU_APER_HOST, *dma_addr))
+                       goto out_dma_unmap;
+       } else {
+               *dma_addr = DMA_MAPPING_ERROR;
+               if (drm->dmem->migrate.clear_func(drm, page_size(dpage),
+                       NOUVEAU_APER_VRAM, paddr))
+                       goto out_free_page;
+       }
 
+       *pfn = NVIF_VMM_PFNMAP_V0_V | NVIF_VMM_PFNMAP_V0_VRAM |
+               ((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
+       if (src & MIGRATE_PFN_WRITE)
+               *pfn |= NVIF_VMM_PFNMAP_V0_W;
        return migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
 
 out_dma_unmap:
@@ -590,19 +571,21 @@ out_dma_unmap:
 out_free_page:
        nouveau_dmem_page_free_locked(drm, dpage);
 out:
+       *pfn = NVIF_VMM_PFNMAP_V0_NONE;
        return 0;
 }
 
 static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
-               struct migrate_vma *args, dma_addr_t *dma_addrs)
+               struct nouveau_svmm *svmm, struct migrate_vma *args,
+               dma_addr_t *dma_addrs, u64 *pfns)
 {
        struct nouveau_fence *fence;
        unsigned long addr = args->start, nr_dma = 0, i;
 
        for (i = 0; addr < args->end; i++) {
                args->dst[i] = nouveau_dmem_migrate_copy_one(drm, args->src[i],
-                               dma_addrs + nr_dma);
-               if (args->dst[i])
+                               dma_addrs + nr_dma, pfns + i);
+               if (!dma_mapping_error(drm->dev->dev, dma_addrs[nr_dma]))
                        nr_dma++;
                addr += PAGE_SIZE;
        }
@@ -610,20 +593,18 @@ static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
        nouveau_fence_new(drm->dmem->migrate.chan, false, &fence);
        migrate_vma_pages(args);
        nouveau_dmem_fence_done(&fence);
+       nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i);
 
        while (nr_dma--) {
                dma_unmap_page(drm->dev->dev, dma_addrs[nr_dma], PAGE_SIZE,
                                DMA_BIDIRECTIONAL);
        }
-       /*
-        * FIXME optimization: update GPU page table to point to newly migrated
-        * memory.
-        */
        migrate_vma_finalize(args);
 }
 
 int
 nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
+                        struct nouveau_svmm *svmm,
                         struct vm_area_struct *vma,
                         unsigned long start,
                         unsigned long end)
@@ -635,9 +616,13 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
                .vma            = vma,
                .start          = start,
        };
-       unsigned long c, i;
+       unsigned long i;
+       u64 *pfns;
        int ret = -ENOMEM;
 
+       if (drm->dmem == NULL)
+               return -ENODEV;
+
        args.src = kcalloc(max, sizeof(*args.src), GFP_KERNEL);
        if (!args.src)
                goto out;
@@ -649,19 +634,25 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
        if (!dma_addrs)
                goto out_free_dst;
 
-       for (i = 0; i < npages; i += c) {
-               c = min(SG_MAX_SINGLE_ALLOC, npages);
-               args.end = start + (c << PAGE_SHIFT);
+       pfns = nouveau_pfns_alloc(max);
+       if (!pfns)
+               goto out_free_dma;
+
+       for (i = 0; i < npages; i += max) {
+               args.end = start + (max << PAGE_SHIFT);
                ret = migrate_vma_setup(&args);
                if (ret)
-                       goto out_free_dma;
+                       goto out_free_pfns;
 
                if (args.cpages)
-                       nouveau_dmem_migrate_chunk(drm, &args, dma_addrs);
+                       nouveau_dmem_migrate_chunk(drm, svmm, &args, dma_addrs,
+                                                  pfns);
                args.start = args.end;
        }
 
        ret = 0;
+out_free_pfns:
+       nouveau_pfns_free(pfns);
 out_free_dma:
        kfree(dma_addrs);
 out_free_dst:
index db3b59b210af13f799a94e001df725279fdce70b..64da5d3635c8e540a5d1bb0ad7935f9a8b413542 100644 (file)
@@ -25,6 +25,7 @@
 struct drm_device;
 struct drm_file;
 struct nouveau_drm;
+struct nouveau_svmm;
 struct hmm_range;
 
 #if IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM)
@@ -34,6 +35,7 @@ void nouveau_dmem_suspend(struct nouveau_drm *);
 void nouveau_dmem_resume(struct nouveau_drm *);
 
 int nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
+                            struct nouveau_svmm *svmm,
                             struct vm_area_struct *vma,
                             unsigned long start,
                             unsigned long end);
index 2674f1587457a0fa8c053fc9cb139ef8e7453a15..8a0f7994e1aeb623c17375b8d2ecbca9ac4915a4 100644 (file)
@@ -98,3 +98,34 @@ nouveau_dp_detect(struct nouveau_encoder *nv_encoder)
                return NOUVEAU_DP_SST;
        return ret;
 }
+
+/* TODO:
+ * - Use the minimum possible BPC here, once we add support for the max bpc
+ *   property.
+ * - Validate the mode against downstream port caps (see
+ *   drm_dp_downstream_max_clock())
+ * - Validate against the DP caps advertised by the GPU (we don't check these
+ *   yet)
+ */
+enum drm_mode_status
+nv50_dp_mode_valid(struct drm_connector *connector,
+                  struct nouveau_encoder *outp,
+                  const struct drm_display_mode *mode,
+                  unsigned *out_clock)
+{
+       const unsigned min_clock = 25000;
+       unsigned max_clock, clock;
+       enum drm_mode_status ret;
+
+       if (mode->flags & DRM_MODE_FLAG_INTERLACE && !outp->caps.dp_interlace)
+               return MODE_NO_INTERLACE;
+
+       max_clock = outp->dp.link_nr * outp->dp.link_bw;
+       clock = mode->clock * (connector->display_info.bpc * 3) / 10;
+
+       ret = nouveau_conn_mode_clock_valid(mode, min_clock, max_clock,
+                                           &clock);
+       if (out_clock)
+               *out_clock = clock;
+       return ret;
+}
index ca4087f5a15b6c043ddc5245c722dc32049fda3c..ac93d12201dc03439af423d83093c6b8a728da2d 100644 (file)
@@ -681,8 +681,6 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
 {
        struct nvkm_device *device;
        struct drm_device *drm_dev;
-       struct apertures_struct *aper;
-       bool boot = false;
        int ret;
 
        if (vga_switcheroo_client_probe_defer(pdev))
@@ -699,32 +697,9 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
        nvkm_device_del(&device);
 
        /* Remove conflicting drivers (vesafb, efifb etc). */
-       aper = alloc_apertures(3);
-       if (!aper)
-               return -ENOMEM;
-
-       aper->ranges[0].base = pci_resource_start(pdev, 1);
-       aper->ranges[0].size = pci_resource_len(pdev, 1);
-       aper->count = 1;
-
-       if (pci_resource_len(pdev, 2)) {
-               aper->ranges[aper->count].base = pci_resource_start(pdev, 2);
-               aper->ranges[aper->count].size = pci_resource_len(pdev, 2);
-               aper->count++;
-       }
-
-       if (pci_resource_len(pdev, 3)) {
-               aper->ranges[aper->count].base = pci_resource_start(pdev, 3);
-               aper->ranges[aper->count].size = pci_resource_len(pdev, 3);
-               aper->count++;
-       }
-
-#ifdef CONFIG_X86
-       boot = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
-#endif
-       if (nouveau_modeset != 2)
-               drm_fb_helper_remove_conflicting_framebuffers(aper, "nouveaufb", boot);
-       kfree(aper);
+       ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "nouveaufb");
+       if (ret)
+               return ret;
 
        ret = nvkm_device_pci_new(pdev, nouveau_config, nouveau_debug,
                                  true, true, ~0ULL, &device);
index 3517f920bf8932952b7cf63e5feebabf0e1f77e6..de51733b04761586f7e737fb0426e8192d01bdfa 100644 (file)
@@ -66,6 +66,10 @@ struct nouveau_encoder {
                } dp;
        };
 
+       struct {
+               bool dp_interlace : 1;
+       } caps;
+
        void (*enc_save)(struct drm_encoder *encoder);
        void (*enc_restore)(struct drm_encoder *encoder);
        void (*update)(struct nouveau_encoder *, u8 head,
@@ -100,6 +104,10 @@ enum nouveau_dp_status {
 };
 
 int nouveau_dp_detect(struct nouveau_encoder *);
+enum drm_mode_status nv50_dp_mode_valid(struct drm_connector *,
+                                       struct nouveau_encoder *,
+                                       const struct drm_display_mode *,
+                                       unsigned *clock);
 
 struct nouveau_connector *
 nouveau_encoder_connector_get(struct nouveau_encoder *encoder);
index 24d543a01f435ae2bfa9861c78edc7339a2704f5..3d11b84d4cf9f8a1e333a1f90efd44d98543525e 100644 (file)
@@ -312,7 +312,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
        struct nouveau_drm *drm = nouveau_drm(dev);
        struct nvif_device *device = &drm->client.device;
        struct fb_info *info;
-       struct nouveau_framebuffer *fb;
+       struct drm_framebuffer *fb;
        struct nouveau_channel *chan;
        struct nouveau_bo *nvbo;
        struct drm_mode_fb_cmd2 mode_cmd;
@@ -335,7 +335,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
                goto out;
        }
 
-       ret = nouveau_framebuffer_new(dev, &mode_cmd, nvbo, &fb);
+       ret = nouveau_framebuffer_new(dev, &mode_cmd, &nvbo->bo.base, &fb);
        if (ret)
                goto out_unref;
 
@@ -353,7 +353,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
 
        chan = nouveau_nofbaccel ? NULL : drm->channel;
        if (chan && device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
-               ret = nouveau_vma_new(nvbo, chan->vmm, &fb->vma);
+               ret = nouveau_vma_new(nvbo, chan->vmm, &fbcon->vma);
                if (ret) {
                        NV_ERROR(drm, "failed to map fb into chan: %d\n", ret);
                        chan = NULL;
@@ -367,7 +367,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
        }
 
        /* setup helper */
-       fbcon->helper.fb = &fb->base;
+       fbcon->helper.fb = fb;
 
        if (!chan)
                info->flags = FBINFO_HWACCEL_DISABLED;
@@ -376,12 +376,12 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
                              FBINFO_HWACCEL_FILLRECT |
                              FBINFO_HWACCEL_IMAGEBLIT;
        info->fbops = &nouveau_fbcon_sw_ops;
-       info->fix.smem_start = fb->nvbo->bo.mem.bus.base +
-                              fb->nvbo->bo.mem.bus.offset;
-       info->fix.smem_len = fb->nvbo->bo.mem.num_pages << PAGE_SHIFT;
+       info->fix.smem_start = nvbo->bo.mem.bus.base +
+                              nvbo->bo.mem.bus.offset;
+       info->fix.smem_len = nvbo->bo.mem.num_pages << PAGE_SHIFT;
 
-       info->screen_base = nvbo_kmap_obj_iovirtual(fb->nvbo);
-       info->screen_size = fb->nvbo->bo.mem.num_pages << PAGE_SHIFT;
+       info->screen_base = nvbo_kmap_obj_iovirtual(nvbo);
+       info->screen_size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
 
        drm_fb_helper_fill_info(info, &fbcon->helper, sizes);
 
@@ -393,19 +393,19 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
 
        /* To allow resizeing without swapping buffers */
        NV_INFO(drm, "allocated %dx%d fb: 0x%llx, bo %p\n",
-               fb->base.width, fb->base.height, fb->nvbo->bo.offset, nvbo);
+               fb->width, fb->height, nvbo->bo.offset, nvbo);
 
        vga_switcheroo_client_fb_set(dev->pdev, info);
        return 0;
 
 out_unlock:
        if (chan)
-               nouveau_vma_del(&fb->vma);
-       nouveau_bo_unmap(fb->nvbo);
+               nouveau_vma_del(&fbcon->vma);
+       nouveau_bo_unmap(nvbo);
 out_unpin:
-       nouveau_bo_unpin(fb->nvbo);
+       nouveau_bo_unpin(nvbo);
 out_unref:
-       nouveau_bo_ref(NULL, &fb->nvbo);
+       nouveau_bo_ref(NULL, &nvbo);
 out:
        return ret;
 }
@@ -413,16 +413,18 @@ out:
 static int
 nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
 {
-       struct nouveau_framebuffer *nouveau_fb = nouveau_framebuffer(fbcon->helper.fb);
+       struct drm_framebuffer *fb = fbcon->helper.fb;
+       struct nouveau_bo *nvbo;
 
        drm_fb_helper_unregister_fbi(&fbcon->helper);
        drm_fb_helper_fini(&fbcon->helper);
 
-       if (nouveau_fb && nouveau_fb->nvbo) {
-               nouveau_vma_del(&nouveau_fb->vma);
-               nouveau_bo_unmap(nouveau_fb->nvbo);
-               nouveau_bo_unpin(nouveau_fb->nvbo);
-               drm_framebuffer_put(&nouveau_fb->base);
+       if (fb && fb->obj[0]) {
+               nvbo = nouveau_gem_object(fb->obj[0]);
+               nouveau_vma_del(&fbcon->vma);
+               nouveau_bo_unmap(nvbo);
+               nouveau_bo_unpin(nvbo);
+               drm_framebuffer_put(fb);
        }
 
        return 0;
index 73a7eeba39738ee249f7baca95d26f023d34a31a..1796d8824580b14368f4b3a991241cea2700594d 100644 (file)
@@ -31,6 +31,8 @@
 
 #include "nouveau_display.h"
 
+struct nouveau_vma;
+
 struct nouveau_fbdev {
        struct drm_fb_helper helper; /* must be first */
        unsigned int saved_flags;
@@ -41,6 +43,7 @@ struct nouveau_fbdev {
        struct nvif_object gdi;
        struct nvif_object blit;
        struct nvif_object twod;
+       struct nouveau_vma *vma;
 
        struct mutex hotplug_lock;
        bool hotplug_waiting;
index f5ece1f9497348a892ca8a7ddc97354df409d60b..4c3f131ad31da368b200ea7c42e72e30f36835f4 100644 (file)
@@ -76,8 +76,10 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
                return ret;
 
        ret = pm_runtime_get_sync(dev);
-       if (ret < 0 && ret != -EACCES)
+       if (ret < 0 && ret != -EACCES) {
+               pm_runtime_put_autosuspend(dev);
                goto out;
+       }
 
        ret = nouveau_vma_new(nvbo, vmm, &vma);
        pm_runtime_mark_last_busy(dev);
@@ -157,8 +159,8 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
                        if (!WARN_ON(ret < 0 && ret != -EACCES)) {
                                nouveau_gem_object_unmap(nvbo, vma);
                                pm_runtime_mark_last_busy(dev);
-                               pm_runtime_put_autosuspend(dev);
                        }
+                       pm_runtime_put_autosuspend(dev);
                }
        }
        ttm_bo_unreserve(&nvbo->bo);
index 039e23548e08f4074c915160a7a43d89298367ec..23cd43a7fd19a97e4b9638cf6b8f8d90dca94032 100644 (file)
@@ -95,14 +95,3 @@ struct platform_driver nouveau_platform_driver = {
        .probe = nouveau_platform_probe,
        .remove = nouveau_platform_remove,
 };
-
-#if IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC) || IS_ENABLED(CONFIG_ARCH_TEGRA_132_SOC)
-MODULE_FIRMWARE("nvidia/gk20a/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gk20a/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gk20a/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gk20a/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gk20a/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gk20a/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gk20a/sw_method_init.bin");
-MODULE_FIRMWARE("nvidia/gk20a/sw_nonctx.bin");
-#endif
index 407e34a5c0abf834eefaf798bd61600aab0d83a6..22f054f7ee3e405a3d00f87a924b98d0e9d390fe 100644 (file)
@@ -70,6 +70,12 @@ struct nouveau_svm {
 #define SVM_DBG(s,f,a...) NV_DEBUG((s)->drm, "svm: "f"\n", ##a)
 #define SVM_ERR(s,f,a...) NV_WARN((s)->drm, "svm: "f"\n", ##a)
 
+struct nouveau_pfnmap_args {
+       struct nvif_ioctl_v0 i;
+       struct nvif_ioctl_mthd_v0 m;
+       struct nvif_vmm_pfnmap_v0 p;
+};
+
 struct nouveau_ivmm {
        struct nouveau_svmm *svmm;
        u64 inst;
@@ -187,7 +193,8 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
                addr = max(addr, vma->vm_start);
                next = min(vma->vm_end, end);
                /* This is a best effort so we ignore errors */
-               nouveau_dmem_migrate_vma(cli->drm, vma, addr, next);
+               nouveau_dmem_migrate_vma(cli->drm, cli->svm.svmm, vma, addr,
+                                        next);
                addr = next;
        }
 
@@ -814,6 +821,56 @@ nouveau_svm_fault(struct nvif_notify *notify)
        return NVIF_NOTIFY_KEEP;
 }
 
+static struct nouveau_pfnmap_args *
+nouveau_pfns_to_args(void *pfns)
+{
+       return container_of(pfns, struct nouveau_pfnmap_args, p.phys);
+}
+
+u64 *
+nouveau_pfns_alloc(unsigned long npages)
+{
+       struct nouveau_pfnmap_args *args;
+
+       args = kzalloc(struct_size(args, p.phys, npages), GFP_KERNEL);
+       if (!args)
+               return NULL;
+
+       args->i.type = NVIF_IOCTL_V0_MTHD;
+       args->m.method = NVIF_VMM_V0_PFNMAP;
+       args->p.page = PAGE_SHIFT;
+
+       return args->p.phys;
+}
+
+void
+nouveau_pfns_free(u64 *pfns)
+{
+       struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns);
+
+       kfree(args);
+}
+
+void
+nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm,
+                unsigned long addr, u64 *pfns, unsigned long npages)
+{
+       struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns);
+       int ret;
+
+       args->p.addr = addr;
+       args->p.size = npages << PAGE_SHIFT;
+
+       mutex_lock(&svmm->mutex);
+
+       svmm->vmm->vmm.object.client->super = true;
+       ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, sizeof(*args) +
+                               npages * sizeof(args->p.phys[0]), NULL);
+       svmm->vmm->vmm.object.client->super = false;
+
+       mutex_unlock(&svmm->mutex);
+}
+
 static void
 nouveau_svm_fault_buffer_fini(struct nouveau_svm *svm, int id)
 {
index e839d81894611cb4b0bec4eef6fd6944e1db3b6d..f0fcd1b72e8bb2919a9694f541ac740a4c46e333 100644 (file)
@@ -18,6 +18,11 @@ void nouveau_svmm_fini(struct nouveau_svmm **);
 int nouveau_svmm_join(struct nouveau_svmm *, u64 inst);
 void nouveau_svmm_part(struct nouveau_svmm *, u64 inst);
 int nouveau_svmm_bind(struct drm_device *, void *, struct drm_file *);
+
+u64 *nouveau_pfns_alloc(unsigned long npages);
+void nouveau_pfns_free(u64 *pfns);
+void nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm,
+                     unsigned long addr, u64 *pfns, unsigned long npages);
 #else /* IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM) */
 static inline void nouveau_svm_init(struct nouveau_drm *drm) {}
 static inline void nouveau_svm_fini(struct nouveau_drm *drm) {}
index facd18564e0d87b1fea344875dee18117b09e499..47428f79ede8f1915629d7ecdfd2e86227094afe 100644 (file)
@@ -149,7 +149,6 @@ int
 nv50_fbcon_accel_init(struct fb_info *info)
 {
        struct nouveau_fbdev *nfbdev = info->par;
-       struct nouveau_framebuffer *fb = nouveau_framebuffer(nfbdev->helper.fb);
        struct drm_device *dev = nfbdev->helper.dev;
        struct nouveau_drm *drm = nouveau_drm(dev);
        struct nouveau_channel *chan = drm->channel;
@@ -240,8 +239,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
        OUT_RING(chan, info->fix.line_length);
        OUT_RING(chan, info->var.xres_virtual);
        OUT_RING(chan, info->var.yres_virtual);
-       OUT_RING(chan, upper_32_bits(fb->vma->addr));
-       OUT_RING(chan, lower_32_bits(fb->vma->addr));
+       OUT_RING(chan, upper_32_bits(nfbdev->vma->addr));
+       OUT_RING(chan, lower_32_bits(nfbdev->vma->addr));
        BEGIN_NV04(chan, NvSub2D, 0x0230, 2);
        OUT_RING(chan, format);
        OUT_RING(chan, 1);
@@ -249,8 +248,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
        OUT_RING(chan, info->fix.line_length);
        OUT_RING(chan, info->var.xres_virtual);
        OUT_RING(chan, info->var.yres_virtual);
-       OUT_RING(chan, upper_32_bits(fb->vma->addr));
-       OUT_RING(chan, lower_32_bits(fb->vma->addr));
+       OUT_RING(chan, upper_32_bits(nfbdev->vma->addr));
+       OUT_RING(chan, lower_32_bits(nfbdev->vma->addr));
        FIRE_RING(chan);
 
        return 0;
index c0deef4fe7274ff5b96ef36aaba8156e161b106c..cb56163ed6082484713613ba1b751c2605697ed1 100644 (file)
@@ -150,7 +150,6 @@ nvc0_fbcon_accel_init(struct fb_info *info)
 {
        struct nouveau_fbdev *nfbdev = info->par;
        struct drm_device *dev = nfbdev->helper.dev;
-       struct nouveau_framebuffer *fb = nouveau_framebuffer(nfbdev->helper.fb);
        struct nouveau_drm *drm = nouveau_drm(dev);
        struct nouveau_channel *chan = drm->channel;
        int ret, format;
@@ -240,8 +239,8 @@ nvc0_fbcon_accel_init(struct fb_info *info)
        OUT_RING  (chan, info->fix.line_length);
        OUT_RING  (chan, info->var.xres_virtual);
        OUT_RING  (chan, info->var.yres_virtual);
-       OUT_RING  (chan, upper_32_bits(fb->vma->addr));
-       OUT_RING  (chan, lower_32_bits(fb->vma->addr));
+       OUT_RING  (chan, upper_32_bits(nfbdev->vma->addr));
+       OUT_RING  (chan, lower_32_bits(nfbdev->vma->addr));
        BEGIN_NVC0(chan, NvSub2D, 0x0230, 10);
        OUT_RING  (chan, format);
        OUT_RING  (chan, 1);
@@ -251,8 +250,8 @@ nvc0_fbcon_accel_init(struct fb_info *info)
        OUT_RING  (chan, info->fix.line_length);
        OUT_RING  (chan, info->var.xres_virtual);
        OUT_RING  (chan, info->var.yres_virtual);
-       OUT_RING  (chan, upper_32_bits(fb->vma->addr));
-       OUT_RING  (chan, lower_32_bits(fb->vma->addr));
+       OUT_RING  (chan, upper_32_bits(nfbdev->vma->addr));
+       OUT_RING  (chan, lower_32_bits(nfbdev->vma->addr));
        FIRE_RING (chan);
 
        return 0;
index 4cc186262d3441fa051b8be0514ee453eb568d7b..38130ef272d6f1023458f18cdf122fe1b16f3329 100644 (file)
@@ -140,7 +140,7 @@ nvkm_memory_new(struct nvkm_device *device, enum nvkm_memory_target target,
 {
        struct nvkm_instmem *imem = device->imem;
        struct nvkm_memory *memory;
-       int ret = -ENOSYS;
+       int ret;
 
        if (unlikely(target != NVKM_MEM_TARGET_INST || !imem))
                return -ENOSYS;
index 79a8f9d305c58d19c69a295cb37cb090ebf3179b..49d468b45d3f4def458e7d1e9051e79434e72340 100644 (file)
@@ -221,3 +221,14 @@ nvkm_subdev_ctor(const struct nvkm_subdev_func *func,
        __mutex_init(&subdev->mutex, name, &nvkm_subdev_lock_class[index]);
        subdev->debug = nvkm_dbgopt(device->dbgopt, name);
 }
+
+int
+nvkm_subdev_new_(const struct nvkm_subdev_func *func,
+                struct nvkm_device *device, int index,
+                struct nvkm_subdev **psubdev)
+{
+       if (!(*psubdev = kzalloc(sizeof(**psubdev), GFP_KERNEL)))
+               return -ENOMEM;
+       nvkm_subdev_ctor(func, device, index, *psubdev);
+       return 0;
+}
index 8ebbe16560083dc573b41dc0336d217264d0909d..5b90c2a1bf3d314ea6b21b3a2da8d3ea8dbb33c1 100644 (file)
@@ -2924,6 +2924,20 @@ nvkm_device_del(struct nvkm_device **pdevice)
        }
 }
 
+static inline bool
+nvkm_device_endianness(struct nvkm_device *device)
+{
+       u32 boot1 = nvkm_rd32(device, 0x000004) & 0x01000001;
+#ifdef __BIG_ENDIAN
+       if (!boot1)
+               return false;
+#else
+       if (boot1)
+               return false;
+#endif
+       return true;
+}
+
 int
 nvkm_device_ctor(const struct nvkm_device_func *func,
                 const struct nvkm_device_quirk *quirk,
@@ -2934,8 +2948,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
 {
        struct nvkm_subdev *subdev;
        u64 mmio_base, mmio_size;
-       u32 boot0, strap;
-       void __iomem *map;
+       u32 boot0, boot1, strap;
        int ret = -EEXIST, i;
        unsigned chipset;
 
@@ -2961,26 +2974,30 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
        mmio_base = device->func->resource_addr(device, 0);
        mmio_size = device->func->resource_size(device, 0);
 
-       /* identify the chipset, and determine classes of subdev/engines */
-       if (detect) {
-               map = ioremap(mmio_base, 0x102000);
-               if (ret = -ENOMEM, map == NULL)
+       if (detect || mmio) {
+               device->pri = ioremap(mmio_base, mmio_size);
+               if (device->pri == NULL) {
+                       nvdev_error(device, "unable to map PRI\n");
+                       ret = -ENOMEM;
                        goto done;
+               }
+       }
 
+       /* identify the chipset, and determine classes of subdev/engines */
+       if (detect) {
                /* switch mmio to cpu's native endianness */
-#ifndef __BIG_ENDIAN
-               if (ioread32_native(map + 0x000004) != 0x00000000) {
-#else
-               if (ioread32_native(map + 0x000004) == 0x00000000) {
-#endif
-                       iowrite32_native(0x01000001, map + 0x000004);
-                       ioread32_native(map);
+               if (!nvkm_device_endianness(device)) {
+                       nvkm_wr32(device, 0x000004, 0x01000001);
+                       nvkm_rd32(device, 0x000000);
+                       if (!nvkm_device_endianness(device)) {
+                               nvdev_error(device,
+                                           "GPU not supported on big-endian\n");
+                               ret = -ENOSYS;
+                               goto done;
+                       }
                }
 
-               /* read boot0 and strapping information */
-               boot0 = ioread32_native(map + 0x000000);
-               strap = ioread32_native(map + 0x101000);
-               iounmap(map);
+               boot0 = nvkm_rd32(device, 0x000000);
 
                /* chipset can be overridden for devel/testing purposes */
                chipset = nvkm_longopt(device->cfgopt, "NvChipset", 0);
@@ -3138,6 +3155,17 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
                nvdev_info(device, "NVIDIA %s (%08x)\n",
                           device->chip->name, boot0);
 
+               /* vGPU detection */
+               boot1 = nvkm_rd32(device, 0x0000004);
+               if (device->card_type >= TU100 && (boot1 & 0x00030000)) {
+                       nvdev_info(device, "vGPUs are not supported\n");
+                       ret = -ENODEV;
+                       goto done;
+               }
+
+               /* read strapping information */
+               strap = nvkm_rd32(device, 0x101000);
+
                /* determine frequency of timing crystal */
                if ( device->card_type <= NV_10 || device->chipset < 0x17 ||
                    (device->chipset >= 0x20 && device->chipset < 0x25))
@@ -3158,15 +3186,6 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
        if (!device->name)
                device->name = device->chip->name;
 
-       if (mmio) {
-               device->pri = ioremap(mmio_base, mmio_size);
-               if (!device->pri) {
-                       nvdev_error(device, "unable to map PRI\n");
-                       ret = -ENOMEM;
-                       goto done;
-               }
-       }
-
        mutex_init(&device->mutex);
 
        for (i = 0; i < NVKM_SUBDEV_NR; i++) {
@@ -3254,6 +3273,10 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
 
        ret = 0;
 done:
+       if (device->pri && (!mmio || ret)) {
+               iounmap(device->pri);
+               device->pri = NULL;
+       }
        mutex_unlock(&nv_devices_mutex);
        return ret;
 }
index 0d584d0da59cf04eeb8b0ac398e2e39d51b6dfa5..571687ba85b8f177b289b96bce8c91dfbac000c0 100644 (file)
@@ -47,6 +47,7 @@ nvkm-y += nvkm/engine/disp/dp.o
 
 nvkm-y += nvkm/engine/disp/hdagt215.o
 nvkm-y += nvkm/engine/disp/hdagf119.o
+nvkm-y += nvkm/engine/disp/hdagv100.o
 
 nvkm-y += nvkm/engine/disp/hdmi.o
 nvkm-y += nvkm/engine/disp/hdmig84.o
@@ -74,6 +75,8 @@ nvkm-y += nvkm/engine/disp/rootgp102.o
 nvkm-y += nvkm/engine/disp/rootgv100.o
 nvkm-y += nvkm/engine/disp/roottu102.o
 
+nvkm-y += nvkm/engine/disp/capsgv100.o
+
 nvkm-y += nvkm/engine/disp/channv50.o
 nvkm-y += nvkm/engine/disp/changf119.o
 nvkm-y += nvkm/engine/disp/changv100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/capsgv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/capsgv100.c
new file mode 100644 (file)
index 0000000..5026e53
--- /dev/null
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2020 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#define gv100_disp_caps(p) container_of((p), struct gv100_disp_caps, object)
+#include "rootnv50.h"
+
+struct gv100_disp_caps {
+       struct nvkm_object object;
+       struct nv50_disp *disp;
+};
+
+static int
+gv100_disp_caps_map(struct nvkm_object *object, void *argv, u32 argc,
+                   enum nvkm_object_map *type, u64 *addr, u64 *size)
+{
+       struct gv100_disp_caps *caps = gv100_disp_caps(object);
+       struct nvkm_device *device = caps->disp->base.engine.subdev.device;
+       *type = NVKM_OBJECT_MAP_IO;
+       *addr = 0x640000 + device->func->resource_addr(device, 0);
+       *size = 0x1000;
+       return 0;
+}
+
+static const struct nvkm_object_func
+gv100_disp_caps = {
+       .map = gv100_disp_caps_map,
+};
+
+int
+gv100_disp_caps_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+                   struct nv50_disp *disp, struct nvkm_object **pobject)
+{
+       struct gv100_disp_caps *caps;
+
+       if (!(caps = kzalloc(sizeof(*caps), GFP_KERNEL)))
+               return -ENOMEM;
+       *pobject = &caps->object;
+
+       nvkm_object_ctor(&gv100_disp_caps, oclass, &caps->object);
+       caps->disp = disp;
+       return 0;
+}
index 0fa0ec0a1de001e304eb9d54aff2ca6aae5067cf..19d2d58344e4adf33d1d8db7e4ec8067b8ee83d5 100644 (file)
 #include "ior.h"
 
 void
-gf119_hda_eld(struct nvkm_ior *ior, u8 *data, u8 size)
+gf119_hda_device_entry(struct nvkm_ior *ior, int head)
 {
        struct nvkm_device *device = ior->disp->engine.subdev.device;
-       const u32 soff = 0x030 * ior->id;
+       const u32 hoff = 0x800 * head;
+       nvkm_mask(device, 0x616548 + hoff, 0x00000070, head << 4);
+}
+
+void
+gf119_hda_eld(struct nvkm_ior *ior, int head, u8 *data, u8 size)
+{
+       struct nvkm_device *device = ior->disp->engine.subdev.device;
+       const u32 soff = 0x030 * ior->id + (head * 0x04);
        int i;
 
        for (i = 0; i < size; i++)
@@ -41,14 +49,14 @@ void
 gf119_hda_hpd(struct nvkm_ior *ior, int head, bool present)
 {
        struct nvkm_device *device = ior->disp->engine.subdev.device;
-       const u32 hoff = 0x800 * head;
+       const u32 soff = 0x030 * ior->id + (head * 0x04);
        u32 data = 0x80000000;
        u32 mask = 0x80000001;
        if (present) {
-               nvkm_mask(device, 0x616548 + hoff, 0x00000070, 0x00000000);
+               ior->func->hda.device_entry(ior, head);
                data |= 0x00000001;
        } else {
                mask |= 0x00000002;
        }
-       nvkm_mask(device, 0x10ec10 + ior->id * 0x030, mask, data);
+       nvkm_mask(device, 0x10ec10 + soff, mask, data);
 }
index 4509d2ba880ea12f965b6c13ddfef80ec323a201..0d1b81fe109304de4bc53c496f3625654b6593e9 100644 (file)
@@ -24,7 +24,7 @@
 #include "ior.h"
 
 void
-gt215_hda_eld(struct nvkm_ior *ior, u8 *data, u8 size)
+gt215_hda_eld(struct nvkm_ior *ior, int head, u8 *data, u8 size)
 {
        struct nvkm_device *device = ior->disp->engine.subdev.device;
        const u32 soff = ior->id * 0x800;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagv100.c
new file mode 100644 (file)
index 0000000..57d374e
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2020 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ior.h"
+
+void
+gv100_hda_device_entry(struct nvkm_ior *ior, int head)
+{
+       struct nvkm_device *device = ior->disp->engine.subdev.device;
+       const u32 hoff = 0x800 * head;
+       nvkm_mask(device, 0x616528 + hoff, 0x00000070, head << 4);
+}
index 009d3a8b7a50a757e317369d5e9b3021d30eed03..c1d7a36e4d3c9c2dffb5e0042a53f5008b98fb3a 100644 (file)
@@ -87,7 +87,8 @@ struct nvkm_ior_func {
 
        struct {
                void (*hpd)(struct nvkm_ior *, int head, bool present);
-               void (*eld)(struct nvkm_ior *, u8 *data, u8 size);
+               void (*eld)(struct nvkm_ior *, int head, u8 *data, u8 size);
+               void (*device_entry)(struct nvkm_ior *, int head);
        } hda;
 };
 
@@ -158,10 +159,13 @@ void gv100_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
 void gm200_hdmi_scdc(struct nvkm_ior *, int, u8);
 
 void gt215_hda_hpd(struct nvkm_ior *, int, bool);
-void gt215_hda_eld(struct nvkm_ior *, u8 *, u8);
+void gt215_hda_eld(struct nvkm_ior *, int, u8 *, u8);
 
 void gf119_hda_hpd(struct nvkm_ior *, int, bool);
-void gf119_hda_eld(struct nvkm_ior *, u8 *, u8);
+void gf119_hda_eld(struct nvkm_ior *, int, u8 *, u8);
+void gf119_hda_device_entry(struct nvkm_ior *, int);
+
+void gv100_hda_device_entry(struct nvkm_ior *, int);
 
 #define IOR_MSG(i,l,f,a...) do {                                               \
        struct nvkm_ior *_ior = (i);                                           \
index 9c658d632d372abd56089d6857f3cb317ecafe9f..47efb48d769a0e04415b67436b7799251a674e38 100644 (file)
@@ -27,6 +27,7 @@
 static const struct nv50_disp_root_func
 gv100_disp_root = {
        .user = {
+               {{-1,-1,GV100_DISP_CAPS                }, gv100_disp_caps_new },
                {{0,0,GV100_DISP_CURSOR                }, gv100_disp_curs_new },
                {{0,0,GV100_DISP_WINDOW_IMM_CHANNEL_DMA}, gv100_disp_wimm_new },
                {{0,0,GV100_DISP_CORE_CHANNEL_DMA      }, gv100_disp_core_new },
index 5f758948d6e1e8954ed762b012f25e70dca2b210..a7672ef17d3bc8474c22d2a5fa63e66b3d94b641 100644 (file)
@@ -155,7 +155,7 @@ nv50_disp_root_mthd_(struct nvkm_object *object, u32 mthd, void *data, u32 size)
                        if (outp->info.type == DCB_OUTPUT_DP)
                                ior->func->dp.audio(ior, hidx, true);
                        ior->func->hda.hpd(ior, hidx, true);
-                       ior->func->hda.eld(ior, data, size);
+                       ior->func->hda.eld(ior, hidx, data, size);
                } else {
                        if (outp->info.type == DCB_OUTPUT_DP)
                                ior->func->dp.audio(ior, hidx, false);
index a1f942793f983a7cb28547db2330ce78d24ae07a..7070f5408d92bf617746b5e7baa76b45eff8f825 100644 (file)
@@ -24,6 +24,9 @@ int  nv50_disp_root_new_(const struct nv50_disp_root_func *, struct nvkm_disp *,
                         const struct nvkm_oclass *, void *data, u32 size,
                         struct nvkm_object **);
 
+int gv100_disp_caps_new(const struct nvkm_oclass *, void *, u32,
+                       struct nv50_disp *, struct nvkm_object **);
+
 extern const struct nvkm_disp_oclass nv50_disp_root_oclass;
 extern const struct nvkm_disp_oclass g84_disp_root_oclass;
 extern const struct nvkm_disp_oclass g94_disp_root_oclass;
index 579a5d02308a0f5c04941c0eca3fa47f91b76b48..d8719d38b98ade83fc34b4e21ce4260f1dccf429 100644 (file)
@@ -27,6 +27,7 @@
 static const struct nv50_disp_root_func
 tu102_disp_root = {
        .user = {
+               {{-1,-1,GV100_DISP_CAPS                }, gv100_disp_caps_new },
                {{0,0,TU102_DISP_CURSOR                }, gv100_disp_curs_new },
                {{0,0,TU102_DISP_WINDOW_IMM_CHANNEL_DMA}, gv100_disp_wimm_new },
                {{0,0,TU102_DISP_CORE_CHANNEL_DMA      }, gv100_disp_core_new },
index 456a5a143522ae19cb2cd1011a08da4811429fe3..3b3643fb101990a25f2b6817be15c0509a4c477b 100644 (file)
@@ -177,6 +177,7 @@ gf119_sor = {
        .hda = {
                .hpd = gf119_hda_hpd,
                .eld = gf119_hda_eld,
+               .device_entry = gf119_hda_device_entry,
        },
 };
 
index b94090edaebff2b7462499c18339790d01dbde91..0c0925680790350a303435b5c3b349640073eb46 100644 (file)
@@ -43,6 +43,7 @@ gk104_sor = {
        .hda = {
                .hpd = gf119_hda_hpd,
                .eld = gf119_hda_eld,
+               .device_entry = gf119_hda_device_entry,
        },
 };
 
index e6965dec09c968f6257649990c0eb78abec02b02..38045c92197f8734f2e5ae54563440f580a5113d 100644 (file)
@@ -57,6 +57,7 @@ gm107_sor = {
        .hda = {
                .hpd = gf119_hda_hpd,
                .eld = gf119_hda_eld,
+               .device_entry = gf119_hda_device_entry,
        },
 };
 
index 384f82652bec1180de2d2c82a219263e44184b71..cf2075db742a21a60db77145ea27e451f34a8343 100644 (file)
@@ -115,6 +115,7 @@ gm200_sor = {
        .hda = {
                .hpd = gf119_hda_hpd,
                .eld = gf119_hda_eld,
+               .device_entry = gf119_hda_device_entry,
        },
 };
 
index b0597ff9a7149fb256fa259caf50a78d2c5f89cd..d11a0dff10c66ae93d0fb457578e8cb6c37638da 100644 (file)
@@ -103,6 +103,7 @@ gv100_sor = {
        .hda = {
                .hpd = gf119_hda_hpd,
                .eld = gf119_hda_eld,
+               .device_entry = gv100_hda_device_entry,
        },
 };
 
index 4d5f3791ea7b039c4bfe19f57efc853dac26ec73..fa6d742512376730e6f2e9f5774a23ab2d8d63aa 100644 (file)
@@ -88,6 +88,7 @@ tu102_sor = {
        .hda = {
                .hpd = gf119_hda_hpd,
                .eld = gf119_hda_eld,
+               .device_entry = gv100_hda_device_entry,
        },
 };
 
index 4209b24a46d703ad4c0c24821a768121edb81e4e..ec330d791d15e8ed577d3ea7185f3a23e5b62377 100644 (file)
@@ -319,6 +319,17 @@ gk20a_gr_load_sw(struct gf100_gr *gr, const char *path, int ver)
        return 0;
 }
 
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC) || IS_ENABLED(CONFIG_ARCH_TEGRA_132_SOC)
+MODULE_FIRMWARE("nvidia/gk20a/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gk20a/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gk20a/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gk20a/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gk20a/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gk20a/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gk20a/sw_method_init.bin");
+MODULE_FIRMWARE("nvidia/gk20a/sw_nonctx.bin");
+#endif
+
 static int
 gk20a_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
 {
index 8eb2a930a9b5e33b857cb4af43f399ff7eaf747f..e4866a02e457eec8a9357a7b1c1b7da705b03b94 100644 (file)
@@ -250,6 +250,11 @@ nvkm_acr_oneinit(struct nvkm_subdev *subdev)
                list_add_tail(&lsf->head, &acr->lsf);
        }
 
+       /* Ensure the falcon that'll provide ACR functions is booted first. */
+       lsf = nvkm_acr_falcon(device);
+       if (lsf)
+               list_move(&lsf->head, &acr->lsf);
+
        if (!acr->wpr_fw || acr->wpr_comp)
                wpr_size = acr->func->wpr_layout(acr);
 
index aecce2dac5586725e327137ba491c9f058d9af11..667fa016496eeb11e61b774f8db98bb9565829b3 100644 (file)
@@ -100,25 +100,21 @@ nvkm_acr_hsfw_load_image(struct nvkm_acr *acr, const char *name, int ver,
        hsfw->data_size = lhdr->data_size;
 
        hsfw->sig.prod.size = fwhdr->sig_prod_size;
-       hsfw->sig.prod.data = kmalloc(hsfw->sig.prod.size, GFP_KERNEL);
+       hsfw->sig.prod.data = kmemdup(fw->data + fwhdr->sig_prod_offset + sig,
+                                     hsfw->sig.prod.size, GFP_KERNEL);
        if (!hsfw->sig.prod.data) {
                ret = -ENOMEM;
                goto done;
        }
 
-       memcpy(hsfw->sig.prod.data, fw->data + fwhdr->sig_prod_offset + sig,
-              hsfw->sig.prod.size);
-
        hsfw->sig.dbg.size = fwhdr->sig_dbg_size;
-       hsfw->sig.dbg.data = kmalloc(hsfw->sig.dbg.size, GFP_KERNEL);
+       hsfw->sig.dbg.data = kmemdup(fw->data + fwhdr->sig_dbg_offset + sig,
+                                    hsfw->sig.dbg.size, GFP_KERNEL);
        if (!hsfw->sig.dbg.data) {
                ret = -ENOMEM;
                goto done;
        }
 
-       memcpy(hsfw->sig.dbg.data, fw->data + fwhdr->sig_dbg_offset + sig,
-              hsfw->sig.dbg.size);
-
        hsfw->sig.patch_loc = loc;
 done:
        nvkm_firmware_put(fw);
index 06572f8ce9148805ca37470c978af17f42e72827..f9c427559538fda562ad5a907d951e96fefc5b71 100644 (file)
  */
 #include "priv.h"
 
-#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
-int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
-bool nouveau_acpi_rom_supported(struct device *);
-#else
-static inline bool
-nouveau_acpi_rom_supported(struct device *dev)
+static int
+acpi_read_bios(acpi_handle rom_handle, u8 *bios, u32 offset, u32 length)
 {
-       return false;
-}
+#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
+       acpi_status status;
+       union acpi_object rom_arg_elements[2], *obj;
+       struct acpi_object_list rom_arg;
+       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
 
-static inline int
-nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len)
-{
+       rom_arg.count = 2;
+       rom_arg.pointer = &rom_arg_elements[0];
+
+       rom_arg_elements[0].type = ACPI_TYPE_INTEGER;
+       rom_arg_elements[0].integer.value = offset;
+
+       rom_arg_elements[1].type = ACPI_TYPE_INTEGER;
+       rom_arg_elements[1].integer.value = length;
+
+       status = acpi_evaluate_object(rom_handle, NULL, &rom_arg, &buffer);
+       if (ACPI_FAILURE(status)) {
+               pr_info("failed to evaluate ROM got %s\n",
+                       acpi_format_exception(status));
+               return -ENODEV;
+       }
+       obj = (union acpi_object *)buffer.pointer;
+       length = min(length, obj->buffer.length);
+       memcpy(bios+offset, obj->buffer.pointer, length);
+       kfree(buffer.pointer);
+       return length;
+#else
        return -EINVAL;
-}
 #endif
+}
 
 /* This version of the shadow function disobeys the ACPI spec and tries
  * to fetch in units of more than 4KiB at a time.  This is a LOT faster
@@ -51,7 +68,7 @@ acpi_read_fast(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
        u32 fetch = limit - start;
 
        if (nvbios_extend(bios, limit) >= 0) {
-               int ret = nouveau_acpi_get_bios_chunk(bios->data, start, fetch);
+               int ret = acpi_read_bios(data, bios->data, start, fetch);
                if (ret == fetch)
                        return fetch;
        }
@@ -73,9 +90,8 @@ acpi_read_slow(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
 
        if (nvbios_extend(bios, limit) >= 0) {
                while (start + fetch < limit) {
-                       int ret = nouveau_acpi_get_bios_chunk(bios->data,
-                                                             start + fetch,
-                                                             0x1000);
+                       int ret = acpi_read_bios(data, bios->data,
+                                                start + fetch, 0x1000);
                        if (ret != 0x1000)
                                break;
                        fetch += 0x1000;
@@ -88,9 +104,22 @@ acpi_read_slow(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
 static void *
 acpi_init(struct nvkm_bios *bios, const char *name)
 {
-       if (!nouveau_acpi_rom_supported(bios->subdev.device->dev))
+#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
+       acpi_status status;
+       acpi_handle dhandle, rom_handle;
+
+       dhandle = ACPI_HANDLE(bios->subdev.device->dev);
+       if (!dhandle)
                return ERR_PTR(-ENODEV);
-       return NULL;
+
+       status = acpi_get_handle(dhandle, "_ROM", &rom_handle);
+       if (ACPI_FAILURE(status))
+               return ERR_PTR(-ENODEV);
+
+       return rom_handle;
+#else
+       return ERR_PTR(-ENODEV);
+#endif
 }
 
 const struct nvbios_source
index d80dbc8f09b2077c790f6ceb186faf504d0f07e6..2340040942c937c20dddd4bfd26a686df360596a 100644 (file)
@@ -114,9 +114,5 @@ int
 gf100_ibus_new(struct nvkm_device *device, int index,
               struct nvkm_subdev **pibus)
 {
-       struct nvkm_subdev *ibus;
-       if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
-               return -ENOMEM;
-       nvkm_subdev_ctor(&gf100_ibus, device, index, ibus);
-       return 0;
+       return nvkm_subdev_new_(&gf100_ibus, device, index, pibus);
 }
index 3905a80da811997566728bafd39a57050c2881e9..1124dadac145b7cb957542aa7da31724213b75f0 100644 (file)
@@ -43,9 +43,5 @@ int
 gf117_ibus_new(struct nvkm_device *device, int index,
               struct nvkm_subdev **pibus)
 {
-       struct nvkm_subdev *ibus;
-       if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
-               return -ENOMEM;
-       nvkm_subdev_ctor(&gf117_ibus, device, index, ibus);
-       return 0;
+       return nvkm_subdev_new_(&gf117_ibus, device, index, pibus);
 }
index 9025ed1bd2a990e872a14c3a3a653721b219840f..f3915f85838ed400e4dc3d4a5bc5b36f12b5591f 100644 (file)
@@ -117,9 +117,5 @@ int
 gk104_ibus_new(struct nvkm_device *device, int index,
               struct nvkm_subdev **pibus)
 {
-       struct nvkm_subdev *ibus;
-       if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
-               return -ENOMEM;
-       nvkm_subdev_ctor(&gk104_ibus, device, index, ibus);
-       return 0;
+       return nvkm_subdev_new_(&gk104_ibus, device, index, pibus);
 }
index 1a4ab825852ce2bf6649c28b8deacd9ace6f91ef..187d544378b04fa603ceba76360a2c0ffcb3b5f1 100644 (file)
@@ -81,9 +81,5 @@ int
 gk20a_ibus_new(struct nvkm_device *device, int index,
               struct nvkm_subdev **pibus)
 {
-       struct nvkm_subdev *ibus;
-       if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
-               return -ENOMEM;
-       nvkm_subdev_ctor(&gk20a_ibus, device, index, ibus);
-       return 0;
+       return nvkm_subdev_new_(&gk20a_ibus, device, index, pibus);
 }
index c63328152bfa669d17d642ab1f7a55045198326b..0f1f0ad6377e2fb875e5de6aa5e518f956376e34 100644 (file)
@@ -32,9 +32,5 @@ int
 gm200_ibus_new(struct nvkm_device *device, int index,
               struct nvkm_subdev **pibus)
 {
-       struct nvkm_subdev *ibus;
-       if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
-               return -ENOMEM;
-       nvkm_subdev_ctor(&gm200_ibus, device, index, ibus);
-       return 0;
+       return nvkm_subdev_new_(&gm200_ibus, device, index, pibus);
 }
index 39db90aa2c8066ee7ce728ac1d9705a01061444d..0347b367cefe47aaec103dc426a81db7866327ed 100644 (file)
@@ -51,9 +51,5 @@ int
 gp10b_ibus_new(struct nvkm_device *device, int index,
               struct nvkm_subdev **pibus)
 {
-       struct nvkm_subdev *ibus;
-       if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
-               return -ENOMEM;
-       nvkm_subdev_ctor(&gp10b_ibus, device, index, ibus);
-       return 0;
+       return nvkm_subdev_new_(&gp10b_ibus, device, index, pibus);
 }
index 41640e0584ac0fb02ede12b1c650267f8209d325..199f94e15c5f25c7fc48984d7e09b34c7570bc13 100644 (file)
@@ -580,7 +580,7 @@ nvkm_vmm_iter(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
                                it.pte[it.lvl]++;
                        }
                }
-       };
+       }
 
        nvkm_vmm_flush(&it);
        return ~0ULL;
index 5e55ecbd8005558d0319d3b84457e55be78d0bc4..d3f8f916d0dbcc815db973dc0785fc8e62b17429 100644 (file)
@@ -304,7 +304,7 @@ int tu102_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
                FILL(VMM, PT, PTEI, _ptes, MAP, _addr);                        \
                PTEI += _ptes;                                                 \
                PTEN -= _ptes;                                                 \
-       };                                                                     \
+       }                                                                      \
        nvkm_done((PT)->memory);                                               \
 } while(0)
 
index 03b355dabab3dc64507e3102259b1fd6451972ba..abf3eda683f0c6a274751a1783cc2d9a9758deaf 100644 (file)
@@ -36,8 +36,8 @@ probe_monitoring_device(struct nvkm_i2c_bus *bus,
 
        request_module("%s%s", I2C_MODULE_PREFIX, info->type);
 
-       client = i2c_new_device(&bus->i2c, info);
-       if (!client)
+       client = i2c_new_client_device(&bus->i2c, info);
+       if (IS_ERR(client))
                return false;
 
        if (!client->dev.driver ||
index dbb90f2d2ccde5973db67530924c358059cec747..6639ee9b05d3d611b8d055af724f057237680e52 100644 (file)
@@ -3137,33 +3137,12 @@ static void _dispc_mgr_set_lcd_timings(struct dispc_device *dispc,
        dispc_write_reg(dispc, DISPC_TIMING_H(channel), timing_h);
        dispc_write_reg(dispc, DISPC_TIMING_V(channel), timing_v);
 
-       if (vm->flags & DISPLAY_FLAGS_VSYNC_HIGH)
-               vs = false;
-       else
-               vs = true;
-
-       if (vm->flags & DISPLAY_FLAGS_HSYNC_HIGH)
-               hs = false;
-       else
-               hs = true;
-
-       if (vm->flags & DISPLAY_FLAGS_DE_HIGH)
-               de = false;
-       else
-               de = true;
-
-       if (vm->flags & DISPLAY_FLAGS_PIXDATA_POSEDGE)
-               ipc = false;
-       else
-               ipc = true;
-
-       /* always use the 'rf' setting */
-       onoff = true;
-
-       if (vm->flags & DISPLAY_FLAGS_SYNC_POSEDGE)
-               rf = true;
-       else
-               rf = false;
+       vs = !!(vm->flags & DISPLAY_FLAGS_VSYNC_LOW);
+       hs = !!(vm->flags & DISPLAY_FLAGS_HSYNC_LOW);
+       de = !!(vm->flags & DISPLAY_FLAGS_DE_LOW);
+       ipc = !!(vm->flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE);
+       onoff = true; /* always use the 'rf' setting */
+       rf = !!(vm->flags & DISPLAY_FLAGS_SYNC_POSEDGE);
 
        l = FLD_VAL(onoff, 17, 17) |
                FLD_VAL(rf, 16, 16) |
index 766553bb2f87b7b7e36cc107396980d10e67be92..9701843ccf09d946c091156f0611811410c465d2 100644 (file)
@@ -208,49 +208,6 @@ static const struct venc_config venc_config_ntsc_trm = {
        .gen_ctrl                               = 0x00F90000,
 };
 
-static const struct venc_config venc_config_pal_bdghi = {
-       .f_control                              = 0,
-       .vidout_ctrl                            = 0,
-       .sync_ctrl                              = 0,
-       .hfltr_ctrl                             = 0,
-       .x_color                                = 0,
-       .line21                                 = 0,
-       .ln_sel                                 = 21,
-       .htrigger_vtrigger                      = 0,
-       .tvdetgp_int_start_stop_x               = 0x00140001,
-       .tvdetgp_int_start_stop_y               = 0x00010001,
-       .gen_ctrl                               = 0x00FB0000,
-
-       .llen                                   = 864-1,
-       .flens                                  = 625-1,
-       .cc_carr_wss_carr                       = 0x2F7625ED,
-       .c_phase                                = 0xDF,
-       .gain_u                                 = 0x111,
-       .gain_v                                 = 0x181,
-       .gain_y                                 = 0x140,
-       .black_level                            = 0x3e,
-       .blank_level                            = 0x3e,
-       .m_control                              = 0<<2 | 1<<1,
-       .bstamp_wss_data                        = 0x42,
-       .s_carr                                 = 0x2a098acb,
-       .l21__wc_ctl                            = 0<<13 | 0x16<<8 | 0<<0,
-       .savid__eavid                           = 0x06A70108,
-       .flen__fal                              = 23<<16 | 624<<0,
-       .lal__phase_reset                       = 2<<17 | 310<<0,
-       .hs_int_start_stop_x                    = 0x00920358,
-       .hs_ext_start_stop_x                    = 0x000F035F,
-       .vs_int_start_x                         = 0x1a7<<16,
-       .vs_int_stop_x__vs_int_start_y          = 0x000601A7,
-       .vs_int_stop_y__vs_ext_start_x          = 0x01AF0036,
-       .vs_ext_stop_x__vs_ext_start_y          = 0x27101af,
-       .vs_ext_stop_y                          = 0x05,
-       .avid_start_stop_x                      = 0x03530082,
-       .avid_start_stop_y                      = 0x0270002E,
-       .fid_int_start_x__fid_int_start_y       = 0x0005008A,
-       .fid_int_offset_y__fid_ext_start_x      = 0x002E0138,
-       .fid_ext_start_y__fid_ext_offset_y      = 0x01380005,
-};
-
 enum venc_videomode {
        VENC_MODE_UNKNOWN,
        VENC_MODE_PAL,
index 34dfb33145b49f047a38adf9e9626d2ad6c52be4..b57fbe8a0ac22e603849e156021f466be935172f 100644 (file)
@@ -80,31 +80,16 @@ static struct drm_info_list omap_dmm_debugfs_list[] = {
        {"tiler_map", tiler_map_show, 0},
 };
 
-int omap_debugfs_init(struct drm_minor *minor)
+void omap_debugfs_init(struct drm_minor *minor)
 {
-       struct drm_device *dev = minor->dev;
-       int ret;
-
-       ret = drm_debugfs_create_files(omap_debugfs_list,
-                       ARRAY_SIZE(omap_debugfs_list),
-                       minor->debugfs_root, minor);
-
-       if (ret) {
-               dev_err(dev->dev, "could not install omap_debugfs_list\n");
-               return ret;
-       }
+       drm_debugfs_create_files(omap_debugfs_list,
+                                ARRAY_SIZE(omap_debugfs_list),
+                                minor->debugfs_root, minor);
 
        if (dmm_is_available())
-               ret = drm_debugfs_create_files(omap_dmm_debugfs_list,
-                               ARRAY_SIZE(omap_dmm_debugfs_list),
-                               minor->debugfs_root, minor);
-
-       if (ret) {
-               dev_err(dev->dev, "could not install omap_dmm_debugfs_list\n");
-               return ret;
-       }
-
-       return ret;
+               drm_debugfs_create_files(omap_dmm_debugfs_list,
+                                        ARRAY_SIZE(omap_dmm_debugfs_list),
+                                        minor->debugfs_root, minor);
 }
 
 #endif
index 7c4b66efcaa707ab3b1d2ae5d8e3d026cb1db7ca..8a1fac680138ef4d736ff859480262e3869a864a 100644 (file)
@@ -82,6 +82,6 @@ struct omap_drm_private {
 };
 
 
-int omap_debugfs_init(struct drm_minor *minor);
+void omap_debugfs_init(struct drm_minor *minor);
 
 #endif /* __OMAPDRM_DRV_H__ */
index a1723c1b5fbf83054d477c6f2260159dd23b8b56..39055c1f0e2f34f2c9b559a0968bd5ea1da937a6 100644 (file)
@@ -18,6 +18,16 @@ config DRM_PANEL_ARM_VERSATILE
          reference designs. The panel is detected using special registers
          in the Versatile family syscon registers.
 
+config DRM_PANEL_ASUS_Z00T_TM5P5_NT35596
+       tristate "ASUS Z00T TM5P5 NT35596 panel"
+       depends on GPIOLIB && OF
+       depends on DRM_MIPI_DSI
+       depends on BACKLIGHT_CLASS_DEVICE
+       help
+         Say Y here if you want to enable support for the ASUS TMP5P5
+         NT35596 1080x1920 video mode panel as found in some Asus
+         Zenfone 2 Laser Z00T devices.
+
 config DRM_PANEL_BOE_HIMAX8279D
        tristate "Boe Himax8279d panel"
        depends on OF
@@ -137,6 +147,17 @@ config DRM_PANEL_KINGDISPLAY_KD097D04
          24 bit RGB per pixel. It provides a MIPI DSI interface to
          the host and has a built-in LED backlight.
 
+config DRM_PANEL_LEADTEK_LTK050H3146W
+       tristate "Leadtek LTK050H3146W panel"
+       depends on OF
+       depends on DRM_MIPI_DSI
+       depends on BACKLIGHT_CLASS_DEVICE
+       help
+         Say Y here if you want to enable support for Leadtek LTK050H3146W
+         TFT-LCD modules. The panel has a 720x1280 resolution and uses
+         24 bit RGB per pixel. It provides a MIPI DSI interface to
+         the host and has a built-in LED backlight.
+
 config DRM_PANEL_LEADTEK_LTK500HD1829
        tristate "Leadtek LTK500HD1829 panel"
        depends on OF
@@ -433,6 +454,14 @@ config DRM_PANEL_TRULY_NT35597_WQXGA
          Say Y here if you want to enable support for Truly NT35597 WQXGA Dual DSI
          Video Mode panel
 
+config DRM_PANEL_VISIONOX_RM69299
+       tristate "Visionox RM69299"
+       depends on OF
+       depends on DRM_MIPI_DSI
+       help
+         Say Y here if you want to enable support for Visionox
+         RM69299  DSI Video Mode panel.
+
 config DRM_PANEL_XINPENG_XPP055C272
        tristate "Xinpeng XPP055C272 panel driver"
        depends on OF
index 96a883cd66305c3b1e459684130d25a68d8eed0a..de74f282c4330048b668e42af1915a32e2e66766 100644 (file)
@@ -1,5 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 obj-$(CONFIG_DRM_PANEL_ARM_VERSATILE) += panel-arm-versatile.o
+obj-$(CONFIG_DRM_PANEL_ASUS_Z00T_TM5P5_NT35596) += panel-asus-z00t-tm5p5-n35596.o
 obj-$(CONFIG_DRM_PANEL_BOE_HIMAX8279D) += panel-boe-himax8279d.o
 obj-$(CONFIG_DRM_PANEL_BOE_TV101WUM_NL6) += panel-boe-tv101wum-nl6.o
 obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o
@@ -12,6 +13,7 @@ obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o
 obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o
 obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o
 obj-$(CONFIG_DRM_PANEL_KINGDISPLAY_KD097D04) += panel-kingdisplay-kd097d04.o
+obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK050H3146W) += panel-leadtek-ltk050h3146w.o
 obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK500HD1829) += panel-leadtek-ltk500hd1829.o
 obj-$(CONFIG_DRM_PANEL_LG_LB035Q02) += panel-lg-lb035q02.o
 obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
@@ -46,4 +48,5 @@ obj-$(CONFIG_DRM_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o
 obj-$(CONFIG_DRM_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
 obj-$(CONFIG_DRM_PANEL_TPO_TPG110) += panel-tpo-tpg110.o
 obj-$(CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA) += panel-truly-nt35597.o
+obj-$(CONFIG_DRM_PANEL_VISIONOX_RM69299) += panel-visionox-rm69299.o
 obj-$(CONFIG_DRM_PANEL_XINPENG_XPP055C272) += panel-xinpeng-xpp055c272.o
diff --git a/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c b/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
new file mode 100644 (file)
index 0000000..39e0f03
--- /dev/null
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+struct tm5p5_nt35596 {
+       struct drm_panel panel;
+       struct mipi_dsi_device *dsi;
+       struct regulator_bulk_data supplies[2];
+       struct gpio_desc *reset_gpio;
+       bool prepared;
+};
+
+static inline struct tm5p5_nt35596 *to_tm5p5_nt35596(struct drm_panel *panel)
+{
+       return container_of(panel, struct tm5p5_nt35596, panel);
+}
+
+#define dsi_generic_write_seq(dsi, seq...) do {                                \
+               static const u8 d[] = { seq };                          \
+               int ret;                                                \
+               ret = mipi_dsi_generic_write(dsi, d, ARRAY_SIZE(d));    \
+               if (ret < 0)                                            \
+                       return ret;                                     \
+       } while (0)
+
+#define dsi_dcs_write_seq(dsi, seq...) do {                            \
+               static const u8 d[] = { seq };                          \
+               int ret;                                                \
+               ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \
+               if (ret < 0)                                            \
+                       return ret;                                     \
+       } while (0)
+
+static void tm5p5_nt35596_reset(struct tm5p5_nt35596 *ctx)
+{
+       gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+       usleep_range(1000, 2000);
+       gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+       usleep_range(1000, 2000);
+       gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+       usleep_range(15000, 16000);
+}
+
+static int tm5p5_nt35596_on(struct tm5p5_nt35596 *ctx)
+{
+       struct mipi_dsi_device *dsi = ctx->dsi;
+
+       dsi_generic_write_seq(dsi, 0xff, 0x05);
+       dsi_generic_write_seq(dsi, 0xfb, 0x01);
+       dsi_generic_write_seq(dsi, 0xc5, 0x31);
+       dsi_generic_write_seq(dsi, 0xff, 0x04);
+       dsi_generic_write_seq(dsi, 0x01, 0x84);
+       dsi_generic_write_seq(dsi, 0x05, 0x25);
+       dsi_generic_write_seq(dsi, 0x06, 0x01);
+       dsi_generic_write_seq(dsi, 0x07, 0x20);
+       dsi_generic_write_seq(dsi, 0x08, 0x06);
+       dsi_generic_write_seq(dsi, 0x09, 0x08);
+       dsi_generic_write_seq(dsi, 0x0a, 0x10);
+       dsi_generic_write_seq(dsi, 0x0b, 0x10);
+       dsi_generic_write_seq(dsi, 0x0c, 0x10);
+       dsi_generic_write_seq(dsi, 0x0d, 0x14);
+       dsi_generic_write_seq(dsi, 0x0e, 0x14);
+       dsi_generic_write_seq(dsi, 0x0f, 0x14);
+       dsi_generic_write_seq(dsi, 0x10, 0x14);
+       dsi_generic_write_seq(dsi, 0x11, 0x14);
+       dsi_generic_write_seq(dsi, 0x12, 0x14);
+       dsi_generic_write_seq(dsi, 0x17, 0xf3);
+       dsi_generic_write_seq(dsi, 0x18, 0xc0);
+       dsi_generic_write_seq(dsi, 0x19, 0xc0);
+       dsi_generic_write_seq(dsi, 0x1a, 0xc0);
+       dsi_generic_write_seq(dsi, 0x1b, 0xb3);
+       dsi_generic_write_seq(dsi, 0x1c, 0xb3);
+       dsi_generic_write_seq(dsi, 0x1d, 0xb3);
+       dsi_generic_write_seq(dsi, 0x1e, 0xb3);
+       dsi_generic_write_seq(dsi, 0x1f, 0xb3);
+       dsi_generic_write_seq(dsi, 0x20, 0xb3);
+       dsi_generic_write_seq(dsi, 0xfb, 0x01);
+       dsi_generic_write_seq(dsi, 0xff, 0x00);
+       dsi_generic_write_seq(dsi, 0xfb, 0x01);
+       dsi_generic_write_seq(dsi, 0x35, 0x01);
+       dsi_generic_write_seq(dsi, 0xd3, 0x06);
+       dsi_generic_write_seq(dsi, 0xd4, 0x04);
+       dsi_generic_write_seq(dsi, 0x5e, 0x0d);
+       dsi_generic_write_seq(dsi, 0x11, 0x00);
+       msleep(100);
+       dsi_generic_write_seq(dsi, 0x29, 0x00);
+       dsi_generic_write_seq(dsi, 0x53, 0x24);
+
+       return 0;
+}
+
+static int tm5p5_nt35596_off(struct tm5p5_nt35596 *ctx)
+{
+       struct mipi_dsi_device *dsi = ctx->dsi;
+       struct device *dev = &dsi->dev;
+       int ret;
+
+       ret = mipi_dsi_dcs_set_display_off(dsi);
+       if (ret < 0) {
+               dev_err(dev, "Failed to set display off: %d\n", ret);
+               return ret;
+       }
+       msleep(60);
+
+       ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+       if (ret < 0) {
+               dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
+               return ret;
+       }
+
+       dsi_dcs_write_seq(dsi, 0x4f, 0x01);
+
+       return 0;
+}
+
+static int tm5p5_nt35596_prepare(struct drm_panel *panel)
+{
+       struct tm5p5_nt35596 *ctx = to_tm5p5_nt35596(panel);
+       struct device *dev = &ctx->dsi->dev;
+       int ret;
+
+       if (ctx->prepared)
+               return 0;
+
+       ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+       if (ret < 0) {
+               dev_err(dev, "Failed to enable regulators: %d\n", ret);
+               return ret;
+       }
+
+       tm5p5_nt35596_reset(ctx);
+
+       ret = tm5p5_nt35596_on(ctx);
+       if (ret < 0) {
+               dev_err(dev, "Failed to initialize panel: %d\n", ret);
+               gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+               regulator_bulk_disable(ARRAY_SIZE(ctx->supplies),
+                                      ctx->supplies);
+               return ret;
+       }
+
+       ctx->prepared = true;
+       return 0;
+}
+
+static int tm5p5_nt35596_unprepare(struct drm_panel *panel)
+{
+       struct tm5p5_nt35596 *ctx = to_tm5p5_nt35596(panel);
+       struct device *dev = &ctx->dsi->dev;
+       int ret;
+
+       if (!ctx->prepared)
+               return 0;
+
+       ret = tm5p5_nt35596_off(ctx);
+       if (ret < 0)
+               dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+
+       gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+       regulator_bulk_disable(ARRAY_SIZE(ctx->supplies),
+                              ctx->supplies);
+
+       ctx->prepared = false;
+       return 0;
+}
+
+static const struct drm_display_mode tm5p5_nt35596_mode = {
+       .clock = (1080 + 100 + 8 + 16) * (1920 + 4 + 2 + 4) * 60 / 1000,
+       .hdisplay = 1080,
+       .hsync_start = 1080 + 100,
+       .hsync_end = 1080 + 100 + 8,
+       .htotal = 1080 + 100 + 8 + 16,
+       .vdisplay = 1920,
+       .vsync_start = 1920 + 4,
+       .vsync_end = 1920 + 4 + 2,
+       .vtotal = 1920 + 4 + 2 + 4,
+       .vrefresh = 60,
+       .width_mm = 68,
+       .height_mm = 121,
+};
+
+static int tm5p5_nt35596_get_modes(struct drm_panel *panel,
+                                  struct drm_connector *connector)
+{
+       struct drm_display_mode *mode;
+
+       mode = drm_mode_duplicate(connector->dev, &tm5p5_nt35596_mode);
+       if (!mode)
+               return -ENOMEM;
+
+       drm_mode_set_name(mode);
+
+       mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+       connector->display_info.width_mm = mode->width_mm;
+       connector->display_info.height_mm = mode->height_mm;
+       drm_mode_probed_add(connector, mode);
+
+       return 1;
+}
+
+static const struct drm_panel_funcs tm5p5_nt35596_panel_funcs = {
+       .prepare = tm5p5_nt35596_prepare,
+       .unprepare = tm5p5_nt35596_unprepare,
+       .get_modes = tm5p5_nt35596_get_modes,
+};
+
+static int tm5p5_nt35596_bl_update_status(struct backlight_device *bl)
+{
+       struct mipi_dsi_device *dsi = bl_get_data(bl);
+       u16 brightness = bl->props.brightness;
+       int ret;
+
+       if (bl->props.power != FB_BLANK_UNBLANK ||
+           bl->props.fb_blank != FB_BLANK_UNBLANK ||
+           bl->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK))
+               brightness = 0;
+
+       dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+       ret = mipi_dsi_dcs_set_display_brightness(dsi, brightness);
+       if (ret < 0)
+               return ret;
+
+       dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+       return 0;
+}
+
+static int tm5p5_nt35596_bl_get_brightness(struct backlight_device *bl)
+{
+       struct mipi_dsi_device *dsi = bl_get_data(bl);
+       u16 brightness = bl->props.brightness;
+       int ret;
+
+       dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+       ret = mipi_dsi_dcs_get_display_brightness(dsi, &brightness);
+       if (ret < 0)
+               return ret;
+
+       dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+       return brightness & 0xff;
+}
+
+static const struct backlight_ops tm5p5_nt35596_bl_ops = {
+       .update_status = tm5p5_nt35596_bl_update_status,
+       .get_brightness = tm5p5_nt35596_bl_get_brightness,
+};
+
+static struct backlight_device *
+tm5p5_nt35596_create_backlight(struct mipi_dsi_device *dsi)
+{
+       struct device *dev = &dsi->dev;
+       const struct backlight_properties props = {
+               .type = BACKLIGHT_RAW,
+               .brightness = 255,
+               .max_brightness = 255,
+       };
+
+       return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
+                                             &tm5p5_nt35596_bl_ops, &props);
+}
+
+static int tm5p5_nt35596_probe(struct mipi_dsi_device *dsi)
+{
+       struct device *dev = &dsi->dev;
+       struct tm5p5_nt35596 *ctx;
+       int ret;
+
+       ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+       if (!ctx)
+               return -ENOMEM;
+
+       ctx->supplies[0].supply = "vdd";
+       ctx->supplies[1].supply = "vddio";
+       ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
+                                     ctx->supplies);
+       if (ret < 0) {
+               dev_err(dev, "Failed to get regulators: %d\n", ret);
+               return ret;
+       }
+
+       ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+       if (IS_ERR(ctx->reset_gpio)) {
+               ret = PTR_ERR(ctx->reset_gpio);
+               dev_err(dev, "Failed to get reset-gpios: %d\n", ret);
+               return ret;
+       }
+
+       ctx->dsi = dsi;
+       mipi_dsi_set_drvdata(dsi, ctx);
+
+       dsi->lanes = 4;
+       dsi->format = MIPI_DSI_FMT_RGB888;
+       dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+                         MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_EOT_PACKET |
+                         MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
+
+       drm_panel_init(&ctx->panel, dev, &tm5p5_nt35596_panel_funcs,
+                      DRM_MODE_CONNECTOR_DSI);
+
+       ctx->panel.backlight = tm5p5_nt35596_create_backlight(dsi);
+       if (IS_ERR(ctx->panel.backlight)) {
+               ret = PTR_ERR(ctx->panel.backlight);
+               dev_err(dev, "Failed to create backlight: %d\n", ret);
+               return ret;
+       }
+
+       ret = drm_panel_add(&ctx->panel);
+       if (ret < 0) {
+               dev_err(dev, "Failed to add panel: %d\n", ret);
+               return ret;
+       }
+
+       ret = mipi_dsi_attach(dsi);
+       if (ret < 0) {
+               dev_err(dev, "Failed to attach to DSI host: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int tm5p5_nt35596_remove(struct mipi_dsi_device *dsi)
+{
+       struct tm5p5_nt35596 *ctx = mipi_dsi_get_drvdata(dsi);
+       int ret;
+
+       ret = mipi_dsi_detach(dsi);
+       if (ret < 0)
+               dev_err(&dsi->dev,
+                       "Failed to detach from DSI host: %d\n", ret);
+
+       drm_panel_remove(&ctx->panel);
+
+       return 0;
+}
+
+static const struct of_device_id tm5p5_nt35596_of_match[] = {
+       { .compatible = "asus,z00t-tm5p5-n35596" },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, tm5p5_nt35596_of_match);
+
+static struct mipi_dsi_driver tm5p5_nt35596_driver = {
+       .probe = tm5p5_nt35596_probe,
+       .remove = tm5p5_nt35596_remove,
+       .driver = {
+               .name = "panel-tm5p5-nt35596",
+               .of_match_table = tm5p5_nt35596_of_match,
+       },
+};
+module_mipi_dsi_driver(tm5p5_nt35596_driver);
+
+MODULE_AUTHOR("Konrad Dybcio <konradybcio@gmail.com>");
+MODULE_DESCRIPTION("DRM driver for tm5p5 nt35596 1080p video mode dsi panel");
+MODULE_LICENSE("GPL v2");
index 48a164257d18c3571ceec393682e3c1bf77426cc..46fe1805c588080130baf4ac187342ce0e7351fc 100644 (file)
@@ -696,6 +696,34 @@ static const struct panel_desc auo_b101uan08_3_desc = {
        .init_cmds = auo_b101uan08_3_init_cmd,
 };
 
+static const struct drm_display_mode boe_tv105wum_nw0_default_mode = {
+       .clock = 159916,
+       .hdisplay = 1200,
+       .hsync_start = 1200 + 80,
+       .hsync_end = 1200 + 80 + 24,
+       .htotal = 1200 + 80 + 24 + 60,
+       .vdisplay = 1920,
+       .vsync_start = 1920 + 20,
+       .vsync_end = 1920 + 20 + 4,
+       .vtotal = 1920 + 20 + 4 + 10,
+       .vrefresh = 60,
+       .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct panel_desc boe_tv105wum_nw0_desc = {
+       .modes = &boe_tv105wum_nw0_default_mode,
+       .bpc = 8,
+       .size = {
+               .width_mm = 141,
+               .height_mm = 226,
+       },
+       .lanes = 4,
+       .format = MIPI_DSI_FMT_RGB888,
+       .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+                     MIPI_DSI_MODE_LPM,
+       .init_cmds = boe_init_cmd,
+};
+
 static int boe_panel_get_modes(struct drm_panel *panel,
                               struct drm_connector *connector)
 {
@@ -834,6 +862,9 @@ static const struct of_device_id boe_of_match[] = {
        { .compatible = "auo,b101uan08.3",
          .data = &auo_b101uan08_3_desc
        },
+       { .compatible = "boe,tv105wum-nw0",
+         .data = &boe_tv105wum_nw0_desc
+       },
        { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, boe_of_match);
index 09935520e606359a7ad0fe28e19dc065698e29a7..873b1c7059bd8a1ffd4b474a0c0bf7174fa080fb 100644 (file)
@@ -379,7 +379,7 @@ static int ili9322_init(struct drm_panel *panel, struct ili9322 *ili)
                                "can't set up VCOM amplitude (%d)\n", ret);
                        return ret;
                }
-       };
+       }
 
        if (ili->vcom_high != U8_MAX) {
                ret = regmap_write(ili->regmap, ILI9322_VCOM_HIGH,
@@ -388,7 +388,7 @@ static int ili9322_init(struct drm_panel *panel, struct ili9322 *ili)
                        dev_err(ili->dev, "can't set up VCOM high (%d)\n", ret);
                        return ret;
                }
-       };
+       }
 
        /* Set up gamma correction */
        for (i = 0; i < ARRAY_SIZE(ili->gamma); i++) {
diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
new file mode 100644 (file)
index 0000000..5a7a31c
--- /dev/null
@@ -0,0 +1,691 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Theobroma Systems Design und Consulting GmbH
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/media-bus-format.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/display_timing.h>
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+struct ltk050h3146w_cmd {
+       char cmd;
+       char data;
+};
+
+struct ltk050h3146w;
+struct ltk050h3146w_desc {
+       const struct drm_display_mode *mode;
+       int (*init)(struct ltk050h3146w *ctx);
+};
+
+struct ltk050h3146w {
+       struct device *dev;
+       struct drm_panel panel;
+       struct gpio_desc *reset_gpio;
+       struct regulator *vci;
+       struct regulator *iovcc;
+       const struct ltk050h3146w_desc *panel_desc;
+       bool prepared;
+};
+
+static const struct ltk050h3146w_cmd page1_cmds[] = {
+       { 0x22, 0x0A }, /* BGR SS GS */
+       { 0x31, 0x00 }, /* column inversion */
+       { 0x53, 0xA2 }, /* VCOM1 */
+       { 0x55, 0xA2 }, /* VCOM2 */
+       { 0x50, 0x81 }, /* VREG1OUT=5V */
+       { 0x51, 0x85 }, /* VREG2OUT=-5V */
+       { 0x62, 0x0D }, /* EQT Time setting */
+/*
+ * The vendor init selected page 1 here _again_
+ * Is this supposed to be page 2?
+ */
+       { 0xA0, 0x00 },
+       { 0xA1, 0x1A },
+       { 0xA2, 0x28 },
+       { 0xA3, 0x13 },
+       { 0xA4, 0x16 },
+       { 0xA5, 0x29 },
+       { 0xA6, 0x1D },
+       { 0xA7, 0x1E },
+       { 0xA8, 0x84 },
+       { 0xA9, 0x1C },
+       { 0xAA, 0x28 },
+       { 0xAB, 0x75 },
+       { 0xAC, 0x1A },
+       { 0xAD, 0x19 },
+       { 0xAE, 0x4D },
+       { 0xAF, 0x22 },
+       { 0xB0, 0x28 },
+       { 0xB1, 0x54 },
+       { 0xB2, 0x66 },
+       { 0xB3, 0x39 },
+       { 0xC0, 0x00 },
+       { 0xC1, 0x1A },
+       { 0xC2, 0x28 },
+       { 0xC3, 0x13 },
+       { 0xC4, 0x16 },
+       { 0xC5, 0x29 },
+       { 0xC6, 0x1D },
+       { 0xC7, 0x1E },
+       { 0xC8, 0x84 },
+       { 0xC9, 0x1C },
+       { 0xCA, 0x28 },
+       { 0xCB, 0x75 },
+       { 0xCC, 0x1A },
+       { 0xCD, 0x19 },
+       { 0xCE, 0x4D },
+       { 0xCF, 0x22 },
+       { 0xD0, 0x28 },
+       { 0xD1, 0x54 },
+       { 0xD2, 0x66 },
+       { 0xD3, 0x39 },
+};
+
+static const struct ltk050h3146w_cmd page3_cmds[] = {
+       { 0x01, 0x00 },
+       { 0x02, 0x00 },
+       { 0x03, 0x73 },
+       { 0x04, 0x00 },
+       { 0x05, 0x00 },
+       { 0x06, 0x0a },
+       { 0x07, 0x00 },
+       { 0x08, 0x00 },
+       { 0x09, 0x01 },
+       { 0x0a, 0x00 },
+       { 0x0b, 0x00 },
+       { 0x0c, 0x01 },
+       { 0x0d, 0x00 },
+       { 0x0e, 0x00 },
+       { 0x0f, 0x1d },
+       { 0x10, 0x1d },
+       { 0x11, 0x00 },
+       { 0x12, 0x00 },
+       { 0x13, 0x00 },
+       { 0x14, 0x00 },
+       { 0x15, 0x00 },
+       { 0x16, 0x00 },
+       { 0x17, 0x00 },
+       { 0x18, 0x00 },
+       { 0x19, 0x00 },
+       { 0x1a, 0x00 },
+       { 0x1b, 0x00 },
+       { 0x1c, 0x00 },
+       { 0x1d, 0x00 },
+       { 0x1e, 0x40 },
+       { 0x1f, 0x80 },
+       { 0x20, 0x06 },
+       { 0x21, 0x02 },
+       { 0x22, 0x00 },
+       { 0x23, 0x00 },
+       { 0x24, 0x00 },
+       { 0x25, 0x00 },
+       { 0x26, 0x00 },
+       { 0x27, 0x00 },
+       { 0x28, 0x33 },
+       { 0x29, 0x03 },
+       { 0x2a, 0x00 },
+       { 0x2b, 0x00 },
+       { 0x2c, 0x00 },
+       { 0x2d, 0x00 },
+       { 0x2e, 0x00 },
+       { 0x2f, 0x00 },
+       { 0x30, 0x00 },
+       { 0x31, 0x00 },
+       { 0x32, 0x00 },
+       { 0x33, 0x00 },
+       { 0x34, 0x04 },
+       { 0x35, 0x00 },
+       { 0x36, 0x00 },
+       { 0x37, 0x00 },
+       { 0x38, 0x3C },
+       { 0x39, 0x35 },
+       { 0x3A, 0x01 },
+       { 0x3B, 0x40 },
+       { 0x3C, 0x00 },
+       { 0x3D, 0x01 },
+       { 0x3E, 0x00 },
+       { 0x3F, 0x00 },
+       { 0x40, 0x00 },
+       { 0x41, 0x88 },
+       { 0x42, 0x00 },
+       { 0x43, 0x00 },
+       { 0x44, 0x1F },
+       { 0x50, 0x01 },
+       { 0x51, 0x23 },
+       { 0x52, 0x45 },
+       { 0x53, 0x67 },
+       { 0x54, 0x89 },
+       { 0x55, 0xab },
+       { 0x56, 0x01 },
+       { 0x57, 0x23 },
+       { 0x58, 0x45 },
+       { 0x59, 0x67 },
+       { 0x5a, 0x89 },
+       { 0x5b, 0xab },
+       { 0x5c, 0xcd },
+       { 0x5d, 0xef },
+       { 0x5e, 0x11 },
+       { 0x5f, 0x01 },
+       { 0x60, 0x00 },
+       { 0x61, 0x15 },
+       { 0x62, 0x14 },
+       { 0x63, 0x0E },
+       { 0x64, 0x0F },
+       { 0x65, 0x0C },
+       { 0x66, 0x0D },
+       { 0x67, 0x06 },
+       { 0x68, 0x02 },
+       { 0x69, 0x07 },
+       { 0x6a, 0x02 },
+       { 0x6b, 0x02 },
+       { 0x6c, 0x02 },
+       { 0x6d, 0x02 },
+       { 0x6e, 0x02 },
+       { 0x6f, 0x02 },
+       { 0x70, 0x02 },
+       { 0x71, 0x02 },
+       { 0x72, 0x02 },
+       { 0x73, 0x02 },
+       { 0x74, 0x02 },
+       { 0x75, 0x01 },
+       { 0x76, 0x00 },
+       { 0x77, 0x14 },
+       { 0x78, 0x15 },
+       { 0x79, 0x0E },
+       { 0x7a, 0x0F },
+       { 0x7b, 0x0C },
+       { 0x7c, 0x0D },
+       { 0x7d, 0x06 },
+       { 0x7e, 0x02 },
+       { 0x7f, 0x07 },
+       { 0x80, 0x02 },
+       { 0x81, 0x02 },
+       { 0x82, 0x02 },
+       { 0x83, 0x02 },
+       { 0x84, 0x02 },
+       { 0x85, 0x02 },
+       { 0x86, 0x02 },
+       { 0x87, 0x02 },
+       { 0x88, 0x02 },
+       { 0x89, 0x02 },
+       { 0x8A, 0x02 },
+};
+
+static const struct ltk050h3146w_cmd page4_cmds[] = {
+       { 0x70, 0x00 },
+       { 0x71, 0x00 },
+       { 0x82, 0x0F }, /* VGH_MOD clamp level=15v */
+       { 0x84, 0x0F }, /* VGH clamp level 15V */
+       { 0x85, 0x0D }, /* VGL clamp level (-10V) */
+       { 0x32, 0xAC },
+       { 0x8C, 0x80 },
+       { 0x3C, 0xF5 },
+       { 0xB5, 0x07 }, /* GAMMA OP */
+       { 0x31, 0x45 }, /* SOURCE OP */
+       { 0x3A, 0x24 }, /* PS_EN OFF */
+       { 0x88, 0x33 }, /* LVD */
+};
+
+static inline
+struct ltk050h3146w *panel_to_ltk050h3146w(struct drm_panel *panel)
+{
+       return container_of(panel, struct ltk050h3146w, panel);
+}
+
+#define dsi_dcs_write_seq(dsi, cmd, seq...) do {                       \
+               static const u8 d[] = { seq };                          \
+               int ret;                                                \
+               ret = mipi_dsi_dcs_write(dsi, cmd, d, ARRAY_SIZE(d));   \
+               if (ret < 0)                                            \
+                       return ret;                                     \
+       } while (0)
+
+static int ltk050h3146w_init_sequence(struct ltk050h3146w *ctx)
+{
+       struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+       int ret;
+
+       /*
+        * Init sequence was supplied by the panel vendor without much
+        * documentation.
+        */
+       dsi_dcs_write_seq(dsi, 0xdf, 0x93, 0x65, 0xf8);
+       dsi_dcs_write_seq(dsi, 0xb0, 0x01, 0x03, 0x02, 0x00, 0x64, 0x06,
+                         0x01);
+       dsi_dcs_write_seq(dsi, 0xb2, 0x00, 0xb5);
+       dsi_dcs_write_seq(dsi, 0xb3, 0x00, 0xb5);
+       dsi_dcs_write_seq(dsi, 0xb7, 0x00, 0xbf, 0x00, 0x00, 0xbf, 0x00);
+
+       dsi_dcs_write_seq(dsi, 0xb9, 0x00, 0xc4, 0x23, 0x07);
+       dsi_dcs_write_seq(dsi, 0xbb, 0x02, 0x01, 0x24, 0x00, 0x28, 0x0f,
+                         0x28, 0x04, 0xcc, 0xcc, 0xcc);
+       dsi_dcs_write_seq(dsi, 0xbc, 0x0f, 0x04);
+       dsi_dcs_write_seq(dsi, 0xbe, 0x1e, 0xf2);
+       dsi_dcs_write_seq(dsi, 0xc0, 0x26, 0x03);
+       dsi_dcs_write_seq(dsi, 0xc1, 0x00, 0x12);
+       dsi_dcs_write_seq(dsi, 0xc3, 0x04, 0x02, 0x02, 0x76, 0x01, 0x80,
+                         0x80);
+       dsi_dcs_write_seq(dsi, 0xc4, 0x24, 0x80, 0xb4, 0x81, 0x12, 0x0f,
+                         0x16, 0x00, 0x00);
+       dsi_dcs_write_seq(dsi, 0xc8, 0x7f, 0x72, 0x67, 0x5d, 0x5d, 0x50,
+                         0x56, 0x41, 0x59, 0x57, 0x55, 0x70, 0x5b, 0x5f,
+                         0x4f, 0x47, 0x38, 0x23, 0x08, 0x7f, 0x72, 0x67,
+                         0x5d, 0x5d, 0x50, 0x56, 0x41, 0x59, 0x57, 0x55,
+                         0x70, 0x5b, 0x5f, 0x4f, 0x47, 0x38, 0x23, 0x08);
+       dsi_dcs_write_seq(dsi, 0xd0, 0x1e, 0x1f, 0x57, 0x58, 0x48, 0x4a,
+                         0x44, 0x46, 0x40, 0x1f, 0x42, 0x1f, 0x1f, 0x1f,
+                         0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
+       dsi_dcs_write_seq(dsi, 0xd1, 0x1e, 0x1f, 0x57, 0x58, 0x49, 0x4b,
+                         0x45, 0x47, 0x41, 0x1f, 0x43, 0x1f, 0x1f, 0x1f,
+                         0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
+       dsi_dcs_write_seq(dsi, 0xd2, 0x1f, 0x1e, 0x17, 0x18, 0x07, 0x05,
+                         0x0b, 0x09, 0x03, 0x1f, 0x01, 0x1f, 0x1f, 0x1f,
+                         0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
+       dsi_dcs_write_seq(dsi, 0xd3, 0x1f, 0x1e, 0x17, 0x18, 0x06, 0x04,
+                         0x0a, 0x08, 0x02, 0x1f, 0x00, 0x1f, 0x1f, 0x1f,
+                         0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
+       dsi_dcs_write_seq(dsi, 0xd4, 0x00, 0x00, 0x00, 0x0c, 0x06, 0x20,
+                         0x01, 0x02, 0x00, 0x60, 0x15, 0xb0, 0x30, 0x03,
+                         0x04, 0x00, 0x60, 0x72, 0x0a, 0x00, 0x60, 0x08);
+       dsi_dcs_write_seq(dsi, 0xd5, 0x00, 0x06, 0x06, 0x00, 0x30, 0x00,
+                         0x00, 0x00, 0x00, 0x00, 0xbc, 0x50, 0x00, 0x05,
+                         0x21, 0x00, 0x60);
+       dsi_dcs_write_seq(dsi, 0xdd, 0x2c, 0xa3, 0x00);
+       dsi_dcs_write_seq(dsi, 0xde, 0x02);
+       dsi_dcs_write_seq(dsi, 0xb2, 0x32, 0x1c);
+       dsi_dcs_write_seq(dsi, 0xb7, 0x3b, 0x70, 0x00, 0x04);
+       dsi_dcs_write_seq(dsi, 0xc1, 0x11);
+       dsi_dcs_write_seq(dsi, 0xbb, 0x21, 0x22, 0x23, 0x24, 0x36, 0x37);
+       dsi_dcs_write_seq(dsi, 0xc2, 0x20, 0x38, 0x1e, 0x84);
+       dsi_dcs_write_seq(dsi, 0xde, 0x00);
+
+       ret = mipi_dsi_dcs_set_tear_on(dsi, 1);
+       if (ret < 0) {
+               DRM_DEV_ERROR(ctx->dev, "failed to set tear on: %d\n",
+                             ret);
+               return ret;
+       }
+
+       msleep(60);
+
+       return 0;
+}
+
+static const struct drm_display_mode ltk050h3146w_mode = {
+       .hdisplay       = 720,
+       .hsync_start    = 720 + 42,
+       .hsync_end      = 720 + 42 + 8,
+       .htotal         = 720 + 42 + 8 + 42,
+       .vdisplay       = 1280,
+       .vsync_start    = 1280 + 12,
+       .vsync_end      = 1280 + 12 + 4,
+       .vtotal         = 1280 + 12 + 4 + 18,
+       .clock          = 64018,
+       .width_mm       = 62,
+       .height_mm      = 110,
+};
+
+static const struct ltk050h3146w_desc ltk050h3146w_data = {
+       .mode = &ltk050h3146w_mode,
+       .init = ltk050h3146w_init_sequence,
+};
+
+static int ltk050h3146w_a2_select_page(struct ltk050h3146w *ctx, int page)
+{
+       struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+       u8 d[3] = { 0x98, 0x81, page };
+
+       return mipi_dsi_dcs_write(dsi, 0xff, d, ARRAY_SIZE(d));
+}
+
+static int ltk050h3146w_a2_write_page(struct ltk050h3146w *ctx, int page,
+                                     const struct ltk050h3146w_cmd *cmds,
+                                     int num)
+{
+       struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+       int i, ret;
+
+       ret = ltk050h3146w_a2_select_page(ctx, page);
+       if (ret < 0) {
+               DRM_DEV_ERROR(ctx->dev, "failed to select page %d: %d\n",
+                             page, ret);
+               return ret;
+       }
+
+       for (i = 0; i < num; i++) {
+               ret = mipi_dsi_generic_write(dsi, &cmds[i],
+                                            sizeof(struct ltk050h3146w_cmd));
+               if (ret < 0) {
+                       DRM_DEV_ERROR(ctx->dev,
+                                     "failed to write page %d init cmds: %d\n",
+                                      page, ret);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+static int ltk050h3146w_a2_init_sequence(struct ltk050h3146w *ctx)
+{
+       struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+       int ret;
+
+       /*
+        * Init sequence was supplied by the panel vendor without much
+        * documentation.
+        */
+       ret = ltk050h3146w_a2_write_page(ctx, 3, page3_cmds,
+                                        ARRAY_SIZE(page3_cmds));
+       if (ret < 0)
+               return ret;
+
+       ret = ltk050h3146w_a2_write_page(ctx, 4, page4_cmds,
+                                        ARRAY_SIZE(page4_cmds));
+       if (ret < 0)
+               return ret;
+
+       ret = ltk050h3146w_a2_write_page(ctx, 1, page1_cmds,
+                                        ARRAY_SIZE(page1_cmds));
+       if (ret < 0)
+               return ret;
+
+       ret = ltk050h3146w_a2_select_page(ctx, 0);
+       if (ret < 0) {
+               DRM_DEV_ERROR(ctx->dev, "failed to select page 0: %d\n", ret);
+               return ret;
+       }
+
+       /* vendor code called this without param, where there should be one */
+       ret = mipi_dsi_dcs_set_tear_on(dsi, 0);
+       if (ret < 0) {
+               DRM_DEV_ERROR(ctx->dev, "failed to set tear on: %d\n",
+                             ret);
+               return ret;
+       }
+
+       msleep(60);
+
+       return 0;
+}
+
+static const struct drm_display_mode ltk050h3146w_a2_mode = {
+       .hdisplay       = 720,
+       .hsync_start    = 720 + 42,
+       .hsync_end      = 720 + 42 + 10,
+       .htotal         = 720 + 42 + 10 + 60,
+       .vdisplay       = 1280,
+       .vsync_start    = 1280 + 18,
+       .vsync_end      = 1280 + 18 + 4,
+       .vtotal         = 1280 + 18 + 4 + 12,
+       .clock          = 65595,
+       .width_mm       = 62,
+       .height_mm      = 110,
+};
+
+static const struct ltk050h3146w_desc ltk050h3146w_a2_data = {
+       .mode = &ltk050h3146w_a2_mode,
+       .init = ltk050h3146w_a2_init_sequence,
+};
+
+static int ltk050h3146w_unprepare(struct drm_panel *panel)
+{
+       struct ltk050h3146w *ctx = panel_to_ltk050h3146w(panel);
+       struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+       int ret;
+
+       if (!ctx->prepared)
+               return 0;
+
+       ret = mipi_dsi_dcs_set_display_off(dsi);
+       if (ret < 0) {
+               DRM_DEV_ERROR(ctx->dev, "failed to set display off: %d\n",
+                             ret);
+               return ret;
+       }
+
+       mipi_dsi_dcs_enter_sleep_mode(dsi);
+       if (ret < 0) {
+               DRM_DEV_ERROR(ctx->dev, "failed to enter sleep mode: %d\n",
+                             ret);
+               return ret;
+       }
+
+       regulator_disable(ctx->iovcc);
+       regulator_disable(ctx->vci);
+
+       ctx->prepared = false;
+
+       return 0;
+}
+
+static int ltk050h3146w_prepare(struct drm_panel *panel)
+{
+       struct ltk050h3146w *ctx = panel_to_ltk050h3146w(panel);
+       struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+       int ret;
+
+       if (ctx->prepared)
+               return 0;
+
+       DRM_DEV_DEBUG_DRIVER(ctx->dev, "Resetting the panel\n");
+       ret = regulator_enable(ctx->vci);
+       if (ret < 0) {
+               DRM_DEV_ERROR(ctx->dev,
+                             "Failed to enable vci supply: %d\n", ret);
+               return ret;
+       }
+       ret = regulator_enable(ctx->iovcc);
+       if (ret < 0) {
+               DRM_DEV_ERROR(ctx->dev,
+                             "Failed to enable iovcc supply: %d\n", ret);
+               goto disable_vci;
+       }
+
+       gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+       usleep_range(5000, 6000);
+       gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+       msleep(20);
+
+       ret = ctx->panel_desc->init(ctx);
+       if (ret < 0) {
+               DRM_DEV_ERROR(ctx->dev, "Panel init sequence failed: %d\n",
+                             ret);
+               goto disable_iovcc;
+       }
+
+       ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+       if (ret < 0) {
+               DRM_DEV_ERROR(ctx->dev, "Failed to exit sleep mode: %d\n", ret);
+               goto disable_iovcc;
+       }
+
+       /* T9: 120ms */
+       msleep(120);
+
+       ret = mipi_dsi_dcs_set_display_on(dsi);
+       if (ret < 0) {
+               DRM_DEV_ERROR(ctx->dev, "Failed to set display on: %d\n", ret);
+               goto disable_iovcc;
+       }
+
+       msleep(50);
+
+       ctx->prepared = true;
+
+       return 0;
+
+disable_iovcc:
+       regulator_disable(ctx->iovcc);
+disable_vci:
+       regulator_disable(ctx->vci);
+       return ret;
+}
+
+static int ltk050h3146w_get_modes(struct drm_panel *panel,
+                                 struct drm_connector *connector)
+{
+       struct ltk050h3146w *ctx = panel_to_ltk050h3146w(panel);
+       struct drm_display_mode *mode;
+
+       mode = drm_mode_duplicate(connector->dev, ctx->panel_desc->mode);
+       if (!mode)
+               return -ENOMEM;
+
+       drm_mode_set_name(mode);
+
+       mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+       connector->display_info.width_mm = mode->width_mm;
+       connector->display_info.height_mm = mode->height_mm;
+       drm_mode_probed_add(connector, mode);
+
+       return 1;
+}
+
+static const struct drm_panel_funcs ltk050h3146w_funcs = {
+       .unprepare      = ltk050h3146w_unprepare,
+       .prepare        = ltk050h3146w_prepare,
+       .get_modes      = ltk050h3146w_get_modes,
+};
+
+static int ltk050h3146w_probe(struct mipi_dsi_device *dsi)
+{
+       struct device *dev = &dsi->dev;
+       struct ltk050h3146w *ctx;
+       int ret;
+
+       ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+       if (!ctx)
+               return -ENOMEM;
+
+       ctx->panel_desc = of_device_get_match_data(dev);
+       if (!ctx->panel_desc)
+               return -EINVAL;
+
+       ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+       if (IS_ERR(ctx->reset_gpio)) {
+               DRM_DEV_ERROR(dev, "cannot get reset gpio\n");
+               return PTR_ERR(ctx->reset_gpio);
+       }
+
+       ctx->vci = devm_regulator_get(dev, "vci");
+       if (IS_ERR(ctx->vci)) {
+               ret = PTR_ERR(ctx->vci);
+               if (ret != -EPROBE_DEFER)
+                       DRM_DEV_ERROR(dev,
+                                     "Failed to request vci regulator: %d\n",
+                                     ret);
+               return ret;
+       }
+
+       ctx->iovcc = devm_regulator_get(dev, "iovcc");
+       if (IS_ERR(ctx->iovcc)) {
+               ret = PTR_ERR(ctx->iovcc);
+               if (ret != -EPROBE_DEFER)
+                       DRM_DEV_ERROR(dev,
+                                     "Failed to request iovcc regulator: %d\n",
+                                     ret);
+               return ret;
+       }
+
+       mipi_dsi_set_drvdata(dsi, ctx);
+
+       ctx->dev = dev;
+
+       dsi->lanes = 4;
+       dsi->format = MIPI_DSI_FMT_RGB888;
+       dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+                         MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET;
+
+       drm_panel_init(&ctx->panel, &dsi->dev, &ltk050h3146w_funcs,
+                      DRM_MODE_CONNECTOR_DSI);
+
+       ret = drm_panel_of_backlight(&ctx->panel);
+       if (ret)
+               return ret;
+
+       drm_panel_add(&ctx->panel);
+
+       ret = mipi_dsi_attach(dsi);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dev, "mipi_dsi_attach failed: %d\n", ret);
+               drm_panel_remove(&ctx->panel);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void ltk050h3146w_shutdown(struct mipi_dsi_device *dsi)
+{
+       struct ltk050h3146w *ctx = mipi_dsi_get_drvdata(dsi);
+       int ret;
+
+       ret = drm_panel_unprepare(&ctx->panel);
+       if (ret < 0)
+               DRM_DEV_ERROR(&dsi->dev, "Failed to unprepare panel: %d\n",
+                             ret);
+
+       ret = drm_panel_disable(&ctx->panel);
+       if (ret < 0)
+               DRM_DEV_ERROR(&dsi->dev, "Failed to disable panel: %d\n",
+                             ret);
+}
+
+static int ltk050h3146w_remove(struct mipi_dsi_device *dsi)
+{
+       struct ltk050h3146w *ctx = mipi_dsi_get_drvdata(dsi);
+       int ret;
+
+       ltk050h3146w_shutdown(dsi);
+
+       ret = mipi_dsi_detach(dsi);
+       if (ret < 0)
+               DRM_DEV_ERROR(&dsi->dev, "Failed to detach from DSI host: %d\n",
+                             ret);
+
+       drm_panel_remove(&ctx->panel);
+
+       return 0;
+}
+
+static const struct of_device_id ltk050h3146w_of_match[] = {
+       {
+               .compatible = "leadtek,ltk050h3146w",
+               .data = &ltk050h3146w_data,
+       },
+       {
+               .compatible = "leadtek,ltk050h3146w-a2",
+               .data = &ltk050h3146w_a2_data,
+       },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ltk050h3146w_of_match);
+
+static struct mipi_dsi_driver ltk050h3146w_driver = {
+       .driver = {
+               .name = "panel-leadtek-ltk050h3146w",
+               .of_match_table = ltk050h3146w_of_match,
+       },
+       .probe  = ltk050h3146w_probe,
+       .remove = ltk050h3146w_remove,
+       .shutdown = ltk050h3146w_shutdown,
+};
+module_mipi_dsi_driver(ltk050h3146w_driver);
+
+MODULE_AUTHOR("Heiko Stuebner <heiko.stuebner@theobroma-systems.com>");
+MODULE_DESCRIPTION("DRM driver for Leadtek LTK050H3146W MIPI DSI panel");
+MODULE_LICENSE("GPL v2");
index 76ecf2de9c4457eb500b22b06dc807b00ed0b3ca..113ab9c0396b2099d2af04716066dc8a02bed889 100644 (file)
@@ -377,7 +377,7 @@ static const struct drm_display_mode default_mode = {
        .vsync_end      = 1280 + 30 + 4,
        .vtotal         = 1280 + 30 + 4 + 12,
        .vrefresh       = 60,
-       .clock          = 41600,
+       .clock          = 69217,
        .width_mm       = 62,
        .height_mm      = 110,
 };
index a470810f7dbef8ecd54595227e4c821a0397b758..05cae8d62d56329e65259929226fa0498683a067 100644 (file)
@@ -49,7 +49,8 @@ enum nt39016_regs {
 #define NT39016_SYSTEM_STANDBY BIT(1)
 
 struct nt39016_panel_info {
-       struct drm_display_mode display_mode;
+       const struct drm_display_mode *display_modes;
+       unsigned int num_modes;
        u16 width_mm, height_mm;
        u32 bus_format, bus_flags;
 };
@@ -212,15 +213,22 @@ static int nt39016_get_modes(struct drm_panel *drm_panel,
        struct nt39016 *panel = to_nt39016(drm_panel);
        const struct nt39016_panel_info *panel_info = panel->panel_info;
        struct drm_display_mode *mode;
+       unsigned int i;
 
-       mode = drm_mode_duplicate(connector->dev, &panel_info->display_mode);
-       if (!mode)
-               return -ENOMEM;
+       for (i = 0; i < panel_info->num_modes; i++) {
+               mode = drm_mode_duplicate(connector->dev,
+                                         &panel_info->display_modes[i]);
+               if (!mode)
+                       return -ENOMEM;
+
+               drm_mode_set_name(mode);
 
-       drm_mode_set_name(mode);
+               mode->type = DRM_MODE_TYPE_DRIVER;
+               if (panel_info->num_modes == 1)
+                       mode->type |= DRM_MODE_TYPE_PREFERRED;
 
-       mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
-       drm_mode_probed_add(connector, mode);
+               drm_mode_probed_add(connector, mode);
+       }
 
        connector->display_info.bpc = 8;
        connector->display_info.width_mm = panel_info->width_mm;
@@ -230,7 +238,7 @@ static int nt39016_get_modes(struct drm_panel *drm_panel,
                                         &panel_info->bus_format, 1);
        connector->display_info.bus_flags = panel_info->bus_flags;
 
-       return 1;
+       return panel_info->num_modes;
 }
 
 static const struct drm_panel_funcs nt39016_funcs = {
@@ -316,8 +324,8 @@ static int nt39016_remove(struct spi_device *spi)
        return 0;
 }
 
-static const struct nt39016_panel_info kd035g6_info = {
-       .display_mode = {
+static const struct drm_display_mode kd035g6_display_modes[] = {
+       {       /* 60 Hz */
                .clock = 6000,
                .hdisplay = 320,
                .hsync_start = 320 + 10,
@@ -330,6 +338,24 @@ static const struct nt39016_panel_info kd035g6_info = {
                .vrefresh = 60,
                .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
        },
+       {       /* 50 Hz */
+               .clock = 5400,
+               .hdisplay = 320,
+               .hsync_start = 320 + 42,
+               .hsync_end = 320 + 42 + 50,
+               .htotal = 320 + 42 + 50 + 20,
+               .vdisplay = 240,
+               .vsync_start = 240 + 5,
+               .vsync_end = 240 + 5 + 1,
+               .vtotal = 240 + 5 + 1 + 4,
+               .vrefresh = 50,
+               .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+       },
+};
+
+static const struct nt39016_panel_info kd035g6_info = {
+       .display_modes = kd035g6_display_modes,
+       .num_modes = ARRAY_SIZE(kd035g6_display_modes),
        .width_mm = 71,
        .height_mm = 53,
        .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
index 3ad828eaefe1ca1c5755b5f0835392aca00cafd8..b6ecd1552132ed308157faf16a77f4e65a7feee5 100644 (file)
@@ -23,6 +23,7 @@
 
 #include <linux/delay.h>
 #include <linux/gpio/consumer.h>
+#include <linux/iopoll.h>
 #include <linux/module.h>
 #include <linux/of_platform.h>
 #include <linux/platform_device.h>
@@ -108,6 +109,7 @@ struct panel_simple {
        struct i2c_adapter *ddc;
 
        struct gpio_desc *enable_gpio;
+       struct gpio_desc *hpd_gpio;
 
        struct drm_display_mode override_mode;
 };
@@ -259,11 +261,37 @@ static int panel_simple_unprepare(struct drm_panel *panel)
        return 0;
 }
 
+static int panel_simple_get_hpd_gpio(struct device *dev,
+                                    struct panel_simple *p, bool from_probe)
+{
+       int err;
+
+       p->hpd_gpio = devm_gpiod_get_optional(dev, "hpd", GPIOD_IN);
+       if (IS_ERR(p->hpd_gpio)) {
+               err = PTR_ERR(p->hpd_gpio);
+
+               /*
+                * If we're called from probe we won't consider '-EPROBE_DEFER'
+                * to be an error--we'll leave the error code in "hpd_gpio".
+                * When we try to use it we'll try again.  This allows for
+                * circular dependencies where the component providing the
+                * hpd gpio needs the panel to init before probing.
+                */
+               if (err != -EPROBE_DEFER || !from_probe) {
+                       dev_err(dev, "failed to get 'hpd' GPIO: %d\n", err);
+                       return err;
+               }
+       }
+
+       return 0;
+}
+
 static int panel_simple_prepare(struct drm_panel *panel)
 {
        struct panel_simple *p = to_panel_simple(panel);
        unsigned int delay;
        int err;
+       int hpd_asserted;
 
        if (p->prepared)
                return 0;
@@ -282,6 +310,26 @@ static int panel_simple_prepare(struct drm_panel *panel)
        if (delay)
                msleep(delay);
 
+       if (p->hpd_gpio) {
+               if (IS_ERR(p->hpd_gpio)) {
+                       err = panel_simple_get_hpd_gpio(panel->dev, p, false);
+                       if (err)
+                               return err;
+               }
+
+               err = readx_poll_timeout(gpiod_get_value_cansleep, p->hpd_gpio,
+                                        hpd_asserted, hpd_asserted,
+                                        1000, 2000000);
+               if (hpd_asserted < 0)
+                       err = hpd_asserted;
+
+               if (err) {
+                       dev_err(panel->dev,
+                               "error waiting for hpd GPIO: %d\n", err);
+                       return err;
+               }
+       }
+
        p->prepared = true;
 
        return 0;
@@ -462,6 +510,11 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
        panel->desc = desc;
 
        panel->no_hpd = of_property_read_bool(dev->of_node, "no-hpd");
+       if (!panel->no_hpd) {
+               err = panel_simple_get_hpd_gpio(dev, panel, true);
+               if (err)
+                       return err;
+       }
 
        panel->supply = devm_regulator_get(dev, "power");
        if (IS_ERR(panel->supply))
@@ -836,7 +889,8 @@ static const struct panel_desc auo_g101evn010 = {
                .width = 216,
                .height = 135,
        },
-       .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+       .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+       .connector_type = DRM_MODE_CONNECTOR_LVDS,
 };
 
 static const struct drm_display_mode auo_g104sn02_mode = {
@@ -862,6 +916,31 @@ static const struct panel_desc auo_g104sn02 = {
        },
 };
 
+static const struct drm_display_mode auo_g121ean01_mode = {
+       .clock = 66700,
+       .hdisplay = 1280,
+       .hsync_start = 1280 + 58,
+       .hsync_end = 1280 + 58 + 8,
+       .htotal = 1280 + 58 + 8 + 70,
+       .vdisplay = 800,
+       .vsync_start = 800 + 6,
+       .vsync_end = 800 + 6 + 4,
+       .vtotal = 800 + 6 + 4 + 10,
+       .vrefresh = 60,
+};
+
+static const struct panel_desc auo_g121ean01 = {
+       .modes = &auo_g121ean01_mode,
+       .num_modes = 1,
+       .bpc = 8,
+       .size = {
+               .width = 261,
+               .height = 163,
+       },
+       .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+       .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
 static const struct display_timing auo_g133han01_timings = {
        .pixelclock = { 134000000, 141200000, 149000000 },
        .hactive = { 1920, 1920, 1920 },
@@ -892,6 +971,31 @@ static const struct panel_desc auo_g133han01 = {
        .connector_type = DRM_MODE_CONNECTOR_LVDS,
 };
 
+static const struct drm_display_mode auo_g156xtn01_mode = {
+       .clock = 76000,
+       .hdisplay = 1366,
+       .hsync_start = 1366 + 33,
+       .hsync_end = 1366 + 33 + 67,
+       .htotal = 1560,
+       .vdisplay = 768,
+       .vsync_start = 768 + 4,
+       .vsync_end = 768 + 4 + 4,
+       .vtotal = 806,
+       .vrefresh = 60,
+};
+
+static const struct panel_desc auo_g156xtn01 = {
+       .modes = &auo_g156xtn01_mode,
+       .num_modes = 1,
+       .bpc = 8,
+       .size = {
+               .width = 344,
+               .height = 194,
+       },
+       .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+       .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
 static const struct display_timing auo_g185han01_timings = {
        .pixelclock = { 120000000, 144000000, 175000000 },
        .hactive = { 1920, 1920, 1920 },
@@ -922,6 +1026,36 @@ static const struct panel_desc auo_g185han01 = {
        .connector_type = DRM_MODE_CONNECTOR_LVDS,
 };
 
+static const struct display_timing auo_g190ean01_timings = {
+       .pixelclock = { 90000000, 108000000, 135000000 },
+       .hactive = { 1280, 1280, 1280 },
+       .hfront_porch = { 126, 184, 1266 },
+       .hback_porch = { 84, 122, 844 },
+       .hsync_len = { 70, 102, 704 },
+       .vactive = { 1024, 1024, 1024 },
+       .vfront_porch = { 4, 26, 76 },
+       .vback_porch = { 2, 8, 25 },
+       .vsync_len = { 2, 8, 25 },
+};
+
+static const struct panel_desc auo_g190ean01 = {
+       .timings = &auo_g190ean01_timings,
+       .num_timings = 1,
+       .bpc = 8,
+       .size = {
+               .width = 376,
+               .height = 301,
+       },
+       .delay = {
+               .prepare = 50,
+               .enable = 200,
+               .disable = 110,
+               .unprepare = 1000,
+       },
+       .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+       .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
 static const struct display_timing auo_p320hvn03_timings = {
        .pixelclock = { 106000000, 148500000, 164000000 },
        .hactive = { 1920, 1920, 1920 },
@@ -1092,6 +1226,38 @@ static const struct panel_desc boe_nv101wxmn51 = {
        },
 };
 
+/* Also used for boe_nv133fhm_n62 */
+static const struct drm_display_mode boe_nv133fhm_n61_modes = {
+       .clock = 147840,
+       .hdisplay = 1920,
+       .hsync_start = 1920 + 48,
+       .hsync_end = 1920 + 48 + 32,
+       .htotal = 1920 + 48 + 32 + 200,
+       .vdisplay = 1080,
+       .vsync_start = 1080 + 3,
+       .vsync_end = 1080 + 3 + 6,
+       .vtotal = 1080 + 3 + 6 + 31,
+       .vrefresh = 60,
+};
+
+/* Also used for boe_nv133fhm_n62 */
+static const struct panel_desc boe_nv133fhm_n61 = {
+       .modes = &boe_nv133fhm_n61_modes,
+       .num_modes = 1,
+       .bpc = 6,
+       .size = {
+               .width = 294,
+               .height = 165,
+       },
+       .delay = {
+               .hpd_absent_delay = 200,
+               .unprepare = 500,
+       },
+       .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+       .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB,
+       .connector_type = DRM_MODE_CONNECTOR_eDP,
+};
+
 static const struct drm_display_mode boe_nv140fhmn49_modes[] = {
        {
                .clock = 148500,
@@ -1980,6 +2146,37 @@ static const struct panel_desc innolux_zj070na_01p = {
        },
 };
 
+static const struct drm_display_mode ivo_m133nwf4_r0_mode = {
+       .clock = 138778,
+       .hdisplay = 1920,
+       .hsync_start = 1920 + 24,
+       .hsync_end = 1920 + 24 + 48,
+       .htotal = 1920 + 24 + 48 + 88,
+       .vdisplay = 1080,
+       .vsync_start = 1080 + 3,
+       .vsync_end = 1080 + 3 + 12,
+       .vtotal = 1080 + 3 + 12 + 17,
+       .vrefresh = 60,
+       .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc ivo_m133nwf4_r0 = {
+       .modes = &ivo_m133nwf4_r0_mode,
+       .num_modes = 1,
+       .bpc = 8,
+       .size = {
+               .width = 294,
+               .height = 165,
+       },
+       .delay = {
+               .hpd_absent_delay = 200,
+               .unprepare = 500,
+       },
+       .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+       .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB,
+       .connector_type = DRM_MODE_CONNECTOR_eDP,
+};
+
 static const struct display_timing koe_tx14d24vm1bpa_timing = {
        .pixelclock = { 5580000, 5850000, 6200000 },
        .hactive = { 320, 320, 320 },
@@ -2168,6 +2365,7 @@ static const struct panel_desc lg_lp120up1 = {
                .width = 267,
                .height = 183,
        },
+       .connector_type = DRM_MODE_CONNECTOR_eDP,
 };
 
 static const struct drm_display_mode lg_lp129qe_mode = {
@@ -3065,6 +3263,32 @@ static const struct panel_desc shelly_sca07010_bfn_lnn = {
        .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
 };
 
+static const struct drm_display_mode starry_kr070pe2t_mode = {
+       .clock = 33000,
+       .hdisplay = 800,
+       .hsync_start = 800 + 209,
+       .hsync_end = 800 + 209 + 1,
+       .htotal = 800 + 209 + 1 + 45,
+       .vdisplay = 480,
+       .vsync_start = 480 + 22,
+       .vsync_end = 480 + 22 + 1,
+       .vtotal = 480 + 22 + 1 + 22,
+       .vrefresh = 60,
+};
+
+static const struct panel_desc starry_kr070pe2t = {
+       .modes = &starry_kr070pe2t_mode,
+       .num_modes = 1,
+       .bpc = 8,
+       .size = {
+               .width = 152,
+               .height = 86,
+       },
+       .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+       .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
+       .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
 static const struct drm_display_mode starry_kr122ea0sra_mode = {
        .clock = 147000,
        .hdisplay = 1920,
@@ -3454,12 +3678,21 @@ static const struct of_device_id platform_of_match[] = {
        }, {
                .compatible = "auo,g104sn02",
                .data = &auo_g104sn02,
+       }, {
+               .compatible = "auo,g121ean01",
+               .data = &auo_g121ean01,
        }, {
                .compatible = "auo,g133han01",
                .data = &auo_g133han01,
+       }, {
+               .compatible = "auo,g156xtn01",
+               .data = &auo_g156xtn01,
        }, {
                .compatible = "auo,g185han01",
                .data = &auo_g185han01,
+       }, {
+               .compatible = "auo,g190ean01",
+               .data = &auo_g190ean01,
        }, {
                .compatible = "auo,p320hvn03",
                .data = &auo_p320hvn03,
@@ -3478,6 +3711,12 @@ static const struct of_device_id platform_of_match[] = {
        }, {
                .compatible = "boe,nv101wxmn51",
                .data = &boe_nv101wxmn51,
+       }, {
+               .compatible = "boe,nv133fhm-n61",
+               .data = &boe_nv133fhm_n61,
+       }, {
+               .compatible = "boe,nv133fhm-n62",
+               .data = &boe_nv133fhm_n61,
        }, {
                .compatible = "boe,nv140fhmn49",
                .data = &boe_nv140fhmn49,
@@ -3586,6 +3825,9 @@ static const struct of_device_id platform_of_match[] = {
        }, {
                .compatible = "innolux,zj070na-01p",
                .data = &innolux_zj070na_01p,
+       }, {
+               .compatible = "ivo,m133nwf4-r0",
+               .data = &ivo_m133nwf4_r0,
        }, {
                .compatible = "koe,tx14d24vm1bpa",
                .data = &koe_tx14d24vm1bpa,
@@ -3715,6 +3957,9 @@ static const struct of_device_id platform_of_match[] = {
        }, {
                .compatible = "shelly,sca07010-bfn-lnn",
                .data = &shelly_sca07010_bfn_lnn,
+       }, {
+               .compatible = "starry,kr070pe2t",
+               .data = &starry_kr070pe2t,
        }, {
                .compatible = "starry,kr122ea0sra",
                .data = &starry_kr122ea0sra,
index 012ca62bf30e6939966a465f2bad9142b3adbc7d..f0ad6081570f570c548cbcea7a5290c91b1cd052 100644 (file)
@@ -490,9 +490,7 @@ static int truly_nt35597_panel_add(struct truly_nt35597 *ctx)
 {
        struct device *dev = ctx->dev;
        int ret, i;
-       const struct nt35597_config *config;
 
-       config = ctx->config;
        for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++)
                ctx->supplies[i].supply = regulator_names[i];
 
diff --git a/drivers/gpu/drm/panel/panel-visionox-rm69299.c b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
new file mode 100644 (file)
index 0000000..42f299a
--- /dev/null
@@ -0,0 +1,303 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+struct visionox_rm69299 {
+       struct drm_panel panel;
+       struct regulator_bulk_data supplies[2];
+       struct gpio_desc *reset_gpio;
+       struct mipi_dsi_device *dsi;
+       bool prepared;
+       bool enabled;
+};
+
+static inline struct visionox_rm69299 *panel_to_ctx(struct drm_panel *panel)
+{
+       return container_of(panel, struct visionox_rm69299, panel);
+}
+
+static int visionox_rm69299_power_on(struct visionox_rm69299 *ctx)
+{
+       int ret;
+
+       ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+       if (ret < 0)
+               return ret;
+
+       /*
+        * Reset sequence of visionox panel requires the panel to be
+        * out of reset for 10ms, followed by being held in reset
+        * for 10ms and then out again
+        */
+       gpiod_set_value(ctx->reset_gpio, 1);
+       usleep_range(10000, 20000);
+       gpiod_set_value(ctx->reset_gpio, 0);
+       usleep_range(10000, 20000);
+       gpiod_set_value(ctx->reset_gpio, 1);
+       usleep_range(10000, 20000);
+
+       return 0;
+}
+
+static int visionox_rm69299_power_off(struct visionox_rm69299 *ctx)
+{
+       gpiod_set_value(ctx->reset_gpio, 0);
+
+       return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static int visionox_rm69299_unprepare(struct drm_panel *panel)
+{
+       struct visionox_rm69299 *ctx = panel_to_ctx(panel);
+       int ret;
+
+       ctx->dsi->mode_flags = 0;
+
+       ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_SET_DISPLAY_OFF, NULL, 0);
+       if (ret < 0)
+               DRM_DEV_ERROR(ctx->panel.dev,
+                             "set_display_off cmd failed ret = %d\n", ret);
+
+       /* 120ms delay required here as per DCS spec */
+       msleep(120);
+
+       ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_ENTER_SLEEP_MODE, NULL, 0);
+       if (ret < 0) {
+               DRM_DEV_ERROR(ctx->panel.dev,
+                             "enter_sleep cmd failed ret = %d\n", ret);
+       }
+
+       ret = visionox_rm69299_power_off(ctx);
+
+       ctx->prepared = false;
+       return ret;
+}
+
+static int visionox_rm69299_prepare(struct drm_panel *panel)
+{
+       struct visionox_rm69299 *ctx = panel_to_ctx(panel);
+       int ret;
+
+       if (ctx->prepared)
+               return 0;
+
+       ret = visionox_rm69299_power_on(ctx);
+       if (ret < 0)
+               return ret;
+
+       ctx->dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+       ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0xfe, 0x00 }, 2);
+       if (ret < 0) {
+               DRM_DEV_ERROR(ctx->panel.dev,
+                             "cmd set tx 0 failed, ret = %d\n", ret);
+               goto power_off;
+       }
+
+       ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0xc2, 0x08 }, 2);
+       if (ret < 0) {
+               DRM_DEV_ERROR(ctx->panel.dev,
+                             "cmd set tx 1 failed, ret = %d\n", ret);
+               goto power_off;
+       }
+
+       ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0x35, 0x00 }, 2);
+       if (ret < 0) {
+               DRM_DEV_ERROR(ctx->panel.dev,
+                             "cmd set tx 2 failed, ret = %d\n", ret);
+               goto power_off;
+       }
+
+       ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0x51, 0xff }, 2);
+       if (ret < 0) {
+               DRM_DEV_ERROR(ctx->panel.dev,
+                             "cmd set tx 3 failed, ret = %d\n", ret);
+               goto power_off;
+       }
+
+       ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_EXIT_SLEEP_MODE, NULL, 0);
+       if (ret < 0) {
+               DRM_DEV_ERROR(ctx->panel.dev,
+                             "exit_sleep_mode cmd failed ret = %d\n", ret);
+               goto power_off;
+       }
+
+       /* Per DSI spec wait 120ms after sending exit sleep DCS command */
+       msleep(120);
+
+       ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_SET_DISPLAY_ON, NULL, 0);
+       if (ret < 0) {
+               DRM_DEV_ERROR(ctx->panel.dev,
+                             "set_display_on cmd failed ret = %d\n", ret);
+               goto power_off;
+       }
+
+       /* Per DSI spec wait 120ms after sending set_display_on DCS command */
+       msleep(120);
+
+       ctx->prepared = true;
+
+       return 0;
+
+power_off:
+       return ret;
+}
+
+static const struct drm_display_mode visionox_rm69299_1080x2248_60hz = {
+       .name = "1080x2248",
+       .clock = 158695,
+       .hdisplay = 1080,
+       .hsync_start = 1080 + 26,
+       .hsync_end = 1080 + 26 + 2,
+       .htotal = 1080 + 26 + 2 + 36,
+       .vdisplay = 2248,
+       .vsync_start = 2248 + 56,
+       .vsync_end = 2248 + 56 + 4,
+       .vtotal = 2248 + 56 + 4 + 4,
+       .vrefresh = 60,
+       .flags = 0,
+};
+
+static int visionox_rm69299_get_modes(struct drm_panel *panel,
+                                     struct drm_connector *connector)
+{
+       struct visionox_rm69299 *ctx = panel_to_ctx(panel);
+       struct drm_display_mode *mode;
+
+       mode = drm_mode_create(connector->dev);
+       if (!mode) {
+               DRM_DEV_ERROR(ctx->panel.dev,
+                             "failed to create a new display mode\n");
+               return 0;
+       }
+
+       connector->display_info.width_mm = 74;
+       connector->display_info.height_mm = 131;
+       drm_mode_copy(mode, &visionox_rm69299_1080x2248_60hz);
+       mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+       drm_mode_probed_add(connector, mode);
+
+       return 1;
+}
+
+static const struct drm_panel_funcs visionox_rm69299_drm_funcs = {
+       .unprepare = visionox_rm69299_unprepare,
+       .prepare = visionox_rm69299_prepare,
+       .get_modes = visionox_rm69299_get_modes,
+};
+
+static int visionox_rm69299_probe(struct mipi_dsi_device *dsi)
+{
+       struct device *dev = &dsi->dev;
+       struct visionox_rm69299 *ctx;
+       int ret;
+
+       ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+       if (!ctx)
+               return -ENOMEM;
+
+       mipi_dsi_set_drvdata(dsi, ctx);
+
+       ctx->panel.dev = dev;
+       ctx->dsi = dsi;
+
+       ctx->supplies[0].supply = "vdda";
+       ctx->supplies[1].supply = "vdd3p3";
+
+       ret = devm_regulator_bulk_get(ctx->panel.dev, ARRAY_SIZE(ctx->supplies),
+                                     ctx->supplies);
+       if (ret < 0)
+               return ret;
+
+       ctx->reset_gpio = devm_gpiod_get(ctx->panel.dev,
+                                        "reset", GPIOD_OUT_LOW);
+       if (IS_ERR(ctx->reset_gpio)) {
+               DRM_DEV_ERROR(dev, "cannot get reset gpio %ld\n",
+                             PTR_ERR(ctx->reset_gpio));
+               return PTR_ERR(ctx->reset_gpio);
+       }
+
+       drm_panel_init(&ctx->panel, dev, &visionox_rm69299_drm_funcs,
+                      DRM_MODE_CONNECTOR_DSI);
+       ctx->panel.dev = dev;
+       ctx->panel.funcs = &visionox_rm69299_drm_funcs;
+       drm_panel_add(&ctx->panel);
+
+       dsi->lanes = 4;
+       dsi->format = MIPI_DSI_FMT_RGB888;
+       dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_LPM |
+                         MIPI_DSI_CLOCK_NON_CONTINUOUS;
+       ret = mipi_dsi_attach(dsi);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dev, "dsi attach failed ret = %d\n", ret);
+               goto err_dsi_attach;
+       }
+
+       ret = regulator_set_load(ctx->supplies[0].consumer, 32000);
+       if (ret) {
+               DRM_DEV_ERROR(dev,
+                             "regulator set load failed for vdda supply ret = %d\n",
+                             ret);
+               goto err_set_load;
+       }
+
+       ret = regulator_set_load(ctx->supplies[1].consumer, 13200);
+       if (ret) {
+               DRM_DEV_ERROR(dev,
+                             "regulator set load failed for vdd3p3 supply ret = %d\n",
+                             ret);
+               goto err_set_load;
+       }
+
+       return 0;
+
+err_set_load:
+       mipi_dsi_detach(dsi);
+err_dsi_attach:
+       drm_panel_remove(&ctx->panel);
+       return ret;
+}
+
+static int visionox_rm69299_remove(struct mipi_dsi_device *dsi)
+{
+       struct visionox_rm69299 *ctx = mipi_dsi_get_drvdata(dsi);
+
+       mipi_dsi_detach(ctx->dsi);
+       mipi_dsi_device_unregister(ctx->dsi);
+
+       drm_panel_remove(&ctx->panel);
+       return 0;
+}
+
+static const struct of_device_id visionox_rm69299_of_match[] = {
+       { .compatible = "visionox,rm69299-1080p-display", },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, visionox_rm69299_of_match);
+
+static struct mipi_dsi_driver visionox_rm69299_driver = {
+       .driver = {
+               .name = "panel-visionox-rm69299",
+               .of_match_table = visionox_rm69299_of_match,
+       },
+       .probe = visionox_rm69299_probe,
+       .remove = visionox_rm69299_remove,
+};
+module_mipi_dsi_driver(visionox_rm69299_driver);
+
+MODULE_DESCRIPTION("Visionox RM69299 DSI Panel Driver");
+MODULE_LICENSE("GPL v2");
index 0c70f0e91d218aad577dfe02ff62efc91cd89edd..67d430d433e0c1f5a27143e2cfde1f9364a1ed6c 100644 (file)
@@ -3,7 +3,6 @@ pl111_drm-y +=  pl111_display.o \
                pl111_versatile.o \
                pl111_drv.o
 
-pl111_drm-$(CONFIG_ARCH_VEXPRESS) += pl111_vexpress.o
 pl111_drm-$(CONFIG_ARCH_NOMADIK) += pl111_nomadik.o
 pl111_drm-$(CONFIG_DEBUG_FS) += pl111_debugfs.o
 
index 3c8e820168543755698bbbbdae7cbee634177dd1..26ca8cdf3e6064093a7fe77a3bdef70ed5f1616f 100644 (file)
@@ -51,10 +51,10 @@ static const struct drm_info_list pl111_debugfs_list[] = {
        {"regs", pl111_debugfs_regs, 0},
 };
 
-int
+void
 pl111_debugfs_init(struct drm_minor *minor)
 {
-       return drm_debugfs_create_files(pl111_debugfs_list,
-                                       ARRAY_SIZE(pl111_debugfs_list),
-                                       minor->debugfs_root, minor);
+       drm_debugfs_create_files(pl111_debugfs_list,
+                                ARRAY_SIZE(pl111_debugfs_list),
+                                minor->debugfs_root, minor);
 }
index 77d2da9a8a7ca829e1672aa386f08e9fb0a4ef18..ba399bcb792f4fa233766ea5263c4ef44a6171ff 100644 (file)
@@ -84,6 +84,6 @@ struct pl111_drm_dev_private {
 
 int pl111_display_init(struct drm_device *dev);
 irqreturn_t pl111_irq(int irq, void *data);
-int pl111_debugfs_init(struct drm_minor *minor);
+void pl111_debugfs_init(struct drm_minor *minor);
 
 #endif /* _PL111_DRM_H_ */
index aa8aa8d9e405a4cf92bdd831eb2cf0a39fd64805..da0c39dae874779fd17e633ffd69c00bc363eb81 100644 (file)
@@ -90,10 +90,13 @@ static int pl111_modeset_init(struct drm_device *dev)
        struct drm_panel *panel = NULL;
        struct drm_bridge *bridge = NULL;
        bool defer = false;
-       int ret = 0;
+       int ret;
        int i;
 
-       drm_mode_config_init(dev);
+       ret = drmm_mode_config_init(dev);
+       if (ret)
+               return ret;
+
        mode_config = &dev->mode_config;
        mode_config->funcs = &mode_config_funcs;
        mode_config->min_width = 1;
@@ -154,7 +157,7 @@ static int pl111_modeset_init(struct drm_device *dev)
                                                    DRM_MODE_CONNECTOR_Unknown);
                if (IS_ERR(bridge)) {
                        ret = PTR_ERR(bridge);
-                       goto out_config;
+                       goto finish;
                }
        } else if (bridge) {
                dev_info(dev->dev, "Using non-panel bridge\n");
@@ -197,8 +200,6 @@ static int pl111_modeset_init(struct drm_device *dev)
 out_bridge:
        if (panel)
                drm_panel_bridge_remove(bridge);
-out_config:
-       drm_mode_config_cleanup(dev);
 finish:
        return ret;
 }
@@ -343,7 +344,6 @@ static int pl111_amba_remove(struct amba_device *amba_dev)
        drm_dev_unregister(drm);
        if (priv->panel)
                drm_panel_bridge_remove(priv->bridge);
-       drm_mode_config_cleanup(drm);
        drm_dev_put(drm);
        of_reserved_mem_device_release(dev);
 
@@ -444,6 +444,7 @@ static const struct amba_id pl111_id_table[] = {
        },
        {0, 0},
 };
+MODULE_DEVICE_TABLE(amba, pl111_id_table);
 
 static struct amba_driver pl111_amba_driver __maybe_unused = {
        .drv = {
index 4f325c410b5d1cab92f155f173ce2b5f2fc51d7a..64f01a4e6767fa6ce9bbb33314edbbbd8768f42d 100644 (file)
@@ -8,9 +8,9 @@
 #include <linux/of.h>
 #include <linux/of_platform.h>
 #include <linux/regmap.h>
+#include <linux/vexpress.h>
 
 #include "pl111_versatile.h"
-#include "pl111_vexpress.h"
 #include "pl111_drm.h"
 
 static struct regmap *versatile_syscon_map;
@@ -361,13 +361,110 @@ static const struct pl111_variant_data pl111_vexpress = {
        .broken_clockdivider = true,
 };
 
+#define VEXPRESS_FPGAMUX_MOTHERBOARD           0x00
+#define VEXPRESS_FPGAMUX_DAUGHTERBOARD_1       0x01
+#define VEXPRESS_FPGAMUX_DAUGHTERBOARD_2       0x02
+
+static int pl111_vexpress_clcd_init(struct device *dev, struct device_node *np,
+                                   struct pl111_drm_dev_private *priv)
+{
+       struct platform_device *pdev;
+       struct device_node *root;
+       struct device_node *child;
+       struct device_node *ct_clcd = NULL;
+       struct regmap *map;
+       bool has_coretile_clcd = false;
+       bool has_coretile_hdlcd = false;
+       bool mux_motherboard = true;
+       u32 val;
+       int ret;
+
+       if (!IS_ENABLED(CONFIG_VEXPRESS_CONFIG))
+               return -ENODEV;
+
+       /*
+        * Check if we have a CLCD or HDLCD on the core tile by checking if a
+        * CLCD or HDLCD is available in the root of the device tree.
+        */
+       root = of_find_node_by_path("/");
+       if (!root)
+               return -EINVAL;
+
+       for_each_available_child_of_node(root, child) {
+               if (of_device_is_compatible(child, "arm,pl111")) {
+                       has_coretile_clcd = true;
+                       ct_clcd = child;
+                       break;
+               }
+               if (of_device_is_compatible(child, "arm,hdlcd")) {
+                       has_coretile_hdlcd = true;
+                       of_node_put(child);
+                       break;
+               }
+       }
+
+       of_node_put(root);
+
+       /*
+        * If there is a coretile HDLCD and it has a driver,
+        * do not mux the CLCD on the motherboard to the DVI.
+        */
+       if (has_coretile_hdlcd && IS_ENABLED(CONFIG_DRM_HDLCD))
+               mux_motherboard = false;
+
+       /*
+        * On the Vexpress CA9 we let the CLCD on the coretile
+        * take precedence, so also in this case do not mux the
+        * motherboard to the DVI.
+        */
+       if (has_coretile_clcd)
+               mux_motherboard = false;
+
+       if (mux_motherboard) {
+               dev_info(dev, "DVI muxed to motherboard CLCD\n");
+               val = VEXPRESS_FPGAMUX_MOTHERBOARD;
+       } else if (ct_clcd == dev->of_node) {
+               dev_info(dev,
+                        "DVI muxed to daughterboard 1 (core tile) CLCD\n");
+               val = VEXPRESS_FPGAMUX_DAUGHTERBOARD_1;
+       } else {
+               dev_info(dev, "core tile graphics present\n");
+               dev_info(dev, "this device will be deactivated\n");
+               return -ENODEV;
+       }
+
+       /* Call into deep Vexpress configuration API */
+       pdev = of_find_device_by_node(np);
+       if (!pdev) {
+               dev_err(dev, "can't find the sysreg device, deferring\n");
+               return -EPROBE_DEFER;
+       }
+
+       map = devm_regmap_init_vexpress_config(&pdev->dev);
+       if (IS_ERR(map)) {
+               platform_device_put(pdev);
+               return PTR_ERR(map);
+       }
+
+       ret = regmap_write(map, 0, val);
+       platform_device_put(pdev);
+       if (ret) {
+               dev_err(dev, "error setting DVI muxmode\n");
+               return -ENODEV;
+       }
+
+       priv->variant = &pl111_vexpress;
+       dev_info(dev, "initializing Versatile Express PL111\n");
+
+       return 0;
+}
+
 int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
 {
        const struct of_device_id *clcd_id;
        enum versatile_clcd versatile_clcd_type;
        struct device_node *np;
        struct regmap *map;
-       int ret;
 
        np = of_find_matching_node_and_match(NULL, versatile_clcd_of_match,
                                             &clcd_id);
@@ -378,6 +475,15 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
 
        versatile_clcd_type = (enum versatile_clcd)clcd_id->data;
 
+       /* Versatile Express special handling */
+       if (versatile_clcd_type == VEXPRESS_CLCD_V2M) {
+               int ret = pl111_vexpress_clcd_init(dev, np, priv);
+               of_node_put(np);
+               if (ret)
+                       dev_err(dev, "Versatile Express init failed - %d", ret);
+               return ret;
+       }
+
        /*
         * On the Integrator, check if we should use the IM-PD1 instead,
         * if we find it, it will take precedence. This is on the Integrator/AP
@@ -390,37 +496,8 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
                        versatile_clcd_type = (enum versatile_clcd)clcd_id->data;
        }
 
-       /* Versatile Express special handling */
-       if (versatile_clcd_type == VEXPRESS_CLCD_V2M) {
-               struct platform_device *pdev;
-
-               /* Registers a driver for the muxfpga */
-               ret = vexpress_muxfpga_init();
-               if (ret) {
-                       dev_err(dev, "unable to initialize muxfpga driver\n");
-                       of_node_put(np);
-                       return ret;
-               }
-
-               /* Call into deep Vexpress configuration API */
-               pdev = of_find_device_by_node(np);
-               if (!pdev) {
-                       dev_err(dev, "can't find the sysreg device, deferring\n");
-                       of_node_put(np);
-                       return -EPROBE_DEFER;
-               }
-               map = dev_get_drvdata(&pdev->dev);
-               if (!map) {
-                       dev_err(dev, "sysreg has not yet probed\n");
-                       platform_device_put(pdev);
-                       of_node_put(np);
-                       return -EPROBE_DEFER;
-               }
-       } else {
-               map = syscon_node_to_regmap(np);
-       }
+       map = syscon_node_to_regmap(np);
        of_node_put(np);
-
        if (IS_ERR(map)) {
                dev_err(dev, "no Versatile syscon regmap\n");
                return PTR_ERR(map);
@@ -466,13 +543,6 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
                priv->variant_display_disable = pl111_realview_clcd_disable;
                dev_info(dev, "set up callbacks for RealView PL111\n");
                break;
-       case VEXPRESS_CLCD_V2M:
-               priv->variant = &pl111_vexpress;
-               dev_info(dev, "initializing Versatile Express PL111\n");
-               ret = pl111_vexpress_clcd_init(dev, priv, map);
-               if (ret)
-                       return ret;
-               break;
        default:
                dev_info(dev, "unknown Versatile system controller\n");
                break;
diff --git a/drivers/gpu/drm/pl111/pl111_vexpress.c b/drivers/gpu/drm/pl111/pl111_vexpress.c
deleted file mode 100644 (file)
index 350570f..0000000
+++ /dev/null
@@ -1,138 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Versatile Express PL111 handling
- * Copyright (C) 2018 Linus Walleij
- *
- * This module binds to the "arm,vexpress-muxfpga" device on the
- * Versatile Express configuration bus and sets up which CLCD instance
- * gets muxed out on the DVI bridge.
- */
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/regmap.h>
-#include <linux/vexpress.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-#include "pl111_drm.h"
-#include "pl111_vexpress.h"
-
-#define VEXPRESS_FPGAMUX_MOTHERBOARD           0x00
-#define VEXPRESS_FPGAMUX_DAUGHTERBOARD_1       0x01
-#define VEXPRESS_FPGAMUX_DAUGHTERBOARD_2       0x02
-
-int pl111_vexpress_clcd_init(struct device *dev,
-                            struct pl111_drm_dev_private *priv,
-                            struct regmap *map)
-{
-       struct device_node *root;
-       struct device_node *child;
-       struct device_node *ct_clcd = NULL;
-       bool has_coretile_clcd = false;
-       bool has_coretile_hdlcd = false;
-       bool mux_motherboard = true;
-       u32 val;
-       int ret;
-
-       /*
-        * Check if we have a CLCD or HDLCD on the core tile by checking if a
-        * CLCD or HDLCD is available in the root of the device tree.
-        */
-       root = of_find_node_by_path("/");
-       if (!root)
-               return -EINVAL;
-
-       for_each_available_child_of_node(root, child) {
-               if (of_device_is_compatible(child, "arm,pl111")) {
-                       has_coretile_clcd = true;
-                       ct_clcd = child;
-                       break;
-               }
-               if (of_device_is_compatible(child, "arm,hdlcd")) {
-                       has_coretile_hdlcd = true;
-                       of_node_put(child);
-                       break;
-               }
-       }
-
-       of_node_put(root);
-
-       /*
-        * If there is a coretile HDLCD and it has a driver,
-        * do not mux the CLCD on the motherboard to the DVI.
-        */
-       if (has_coretile_hdlcd && IS_ENABLED(CONFIG_DRM_HDLCD))
-               mux_motherboard = false;
-
-       /*
-        * On the Vexpress CA9 we let the CLCD on the coretile
-        * take precedence, so also in this case do not mux the
-        * motherboard to the DVI.
-        */
-       if (has_coretile_clcd)
-               mux_motherboard = false;
-
-       if (mux_motherboard) {
-               dev_info(dev, "DVI muxed to motherboard CLCD\n");
-               val = VEXPRESS_FPGAMUX_MOTHERBOARD;
-       } else if (ct_clcd == dev->of_node) {
-               dev_info(dev,
-                        "DVI muxed to daughterboard 1 (core tile) CLCD\n");
-               val = VEXPRESS_FPGAMUX_DAUGHTERBOARD_1;
-       } else {
-               dev_info(dev, "core tile graphics present\n");
-               dev_info(dev, "this device will be deactivated\n");
-               return -ENODEV;
-       }
-
-       ret = regmap_write(map, 0, val);
-       if (ret) {
-               dev_err(dev, "error setting DVI muxmode\n");
-               return -ENODEV;
-       }
-
-       return 0;
-}
-
-/*
- * This sets up the regmap pointer that will then be retrieved by
- * the detection code in pl111_versatile.c and passed in to the
- * pl111_vexpress_clcd_init() function above.
- */
-static int vexpress_muxfpga_probe(struct platform_device *pdev)
-{
-       struct device *dev = &pdev->dev;
-       struct regmap *map;
-
-       map = devm_regmap_init_vexpress_config(&pdev->dev);
-       if (IS_ERR(map))
-               return PTR_ERR(map);
-       dev_set_drvdata(dev, map);
-
-       return 0;
-}
-
-static const struct of_device_id vexpress_muxfpga_match[] = {
-       { .compatible = "arm,vexpress-muxfpga", },
-       {}
-};
-
-static struct platform_driver vexpress_muxfpga_driver = {
-       .driver = {
-               .name = "vexpress-muxfpga",
-               .of_match_table = of_match_ptr(vexpress_muxfpga_match),
-       },
-       .probe = vexpress_muxfpga_probe,
-};
-
-int vexpress_muxfpga_init(void)
-{
-       int ret;
-
-       ret = platform_driver_register(&vexpress_muxfpga_driver);
-       /* -EBUSY just means this driver is already registered */
-       if (ret == -EBUSY)
-               ret = 0;
-       return ret;
-}
diff --git a/drivers/gpu/drm/pl111/pl111_vexpress.h b/drivers/gpu/drm/pl111/pl111_vexpress.h
deleted file mode 100644 (file)
index 5d3681b..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-struct device;
-struct pl111_drm_dev_private;
-struct regmap;
-
-#ifdef CONFIG_ARCH_VEXPRESS
-
-int pl111_vexpress_clcd_init(struct device *dev,
-                            struct pl111_drm_dev_private *priv,
-                            struct regmap *map);
-
-int vexpress_muxfpga_init(void);
-
-#else
-
-static inline int pl111_vexpress_clcd_init(struct device *dev,
-                                          struct pl111_drm_dev_private *priv,
-                                          struct regmap *map)
-{
-       return -ENODEV;
-}
-
-static inline int vexpress_muxfpga_init(void)
-{
-       return 0;
-}
-
-#endif
index a4f4175bbdbeda019eac936187c87c884cbd1539..524d35b648d885045eecfb25bec0bbb7f8a5b569 100644 (file)
@@ -39,7 +39,7 @@ static int
 qxl_debugfs_irq_received(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
-       struct qxl_device *qdev = node->minor->dev->dev_private;
+       struct qxl_device *qdev = to_qxl(node->minor->dev);
 
        seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
        seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
@@ -53,7 +53,7 @@ static int
 qxl_debugfs_buffers_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
-       struct qxl_device *qdev = node->minor->dev->dev_private;
+       struct qxl_device *qdev = to_qxl(node->minor->dev);
        struct qxl_bo *bo;
 
        list_for_each_entry(bo, &qdev->gem.objects, list) {
@@ -79,36 +79,29 @@ static struct drm_info_list qxl_debugfs_list[] = {
 #define QXL_DEBUGFS_ENTRIES ARRAY_SIZE(qxl_debugfs_list)
 #endif
 
-int
+void
 qxl_debugfs_init(struct drm_minor *minor)
 {
 #if defined(CONFIG_DEBUG_FS)
-       int r;
-       struct qxl_device *dev =
-               (struct qxl_device *) minor->dev->dev_private;
+       struct qxl_device *dev = to_qxl(minor->dev);
 
        drm_debugfs_create_files(qxl_debugfs_list, QXL_DEBUGFS_ENTRIES,
                                 minor->debugfs_root, minor);
 
-       r = qxl_ttm_debugfs_init(dev);
-       if (r) {
-               DRM_ERROR("Failed to init TTM debugfs\n");
-               return r;
-       }
+       qxl_ttm_debugfs_init(dev);
 #endif
-       return 0;
 }
 
-int qxl_debugfs_add_files(struct qxl_device *qdev,
-                         struct drm_info_list *files,
-                         unsigned int nfiles)
+void qxl_debugfs_add_files(struct qxl_device *qdev,
+                          struct drm_info_list *files,
+                          unsigned int nfiles)
 {
        unsigned int i;
 
        for (i = 0; i < qdev->debugfs_count; i++) {
                if (qdev->debugfs[i].files == files) {
                        /* Already registered */
-                       return 0;
+                       return;
                }
        }
 
@@ -116,7 +109,7 @@ int qxl_debugfs_add_files(struct qxl_device *qdev,
        if (i > QXL_DEBUGFS_MAX_COMPONENTS) {
                DRM_ERROR("Reached maximum number of debugfs components.\n");
                DRM_ERROR("Report so we increase QXL_DEBUGFS_MAX_COMPONENTS.\n");
-               return -EINVAL;
+               return;
        }
        qdev->debugfs[qdev->debugfs_count].files = files;
        qdev->debugfs[qdev->debugfs_count].num_files = nfiles;
@@ -126,5 +119,4 @@ int qxl_debugfs_add_files(struct qxl_device *qdev,
                                 qdev->ddev.primary->debugfs_root,
                                 qdev->ddev.primary);
 #endif
-       return 0;
 }
index 91f398d51cfadf4e69fd60125147d2d35eb905d1..9d45d5a4278f462db9b0b21a623c8c6bfa478a96 100644 (file)
@@ -221,7 +221,7 @@ static int qxl_add_mode(struct drm_connector *connector,
                        bool preferred)
 {
        struct drm_device *dev = connector->dev;
-       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_device *qdev = to_qxl(dev);
        struct drm_display_mode *mode = NULL;
        int rc;
 
@@ -242,7 +242,7 @@ static int qxl_add_mode(struct drm_connector *connector,
 static int qxl_add_monitors_config_modes(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
-       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_device *qdev = to_qxl(dev);
        struct qxl_output *output = drm_connector_to_qxl_output(connector);
        int h = output->index;
        struct qxl_head *head;
@@ -310,7 +310,7 @@ static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc,
                                            const char *reason)
 {
        struct drm_device *dev = crtc->dev;
-       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_device *qdev = to_qxl(dev);
        struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
        struct qxl_head head;
        int oldcount, i = qcrtc->index;
@@ -400,7 +400,7 @@ static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
                                         unsigned int num_clips)
 {
        /* TODO: vmwgfx where this was cribbed from had locking. Why? */
-       struct qxl_device *qdev = fb->dev->dev_private;
+       struct qxl_device *qdev = to_qxl(fb->dev);
        struct drm_clip_rect norect;
        struct qxl_bo *qobj;
        bool is_primary;
@@ -462,7 +462,7 @@ static const struct drm_crtc_helper_funcs qxl_crtc_helper_funcs = {
 static int qxl_primary_atomic_check(struct drm_plane *plane,
                                    struct drm_plane_state *state)
 {
-       struct qxl_device *qdev = plane->dev->dev_private;
+       struct qxl_device *qdev = to_qxl(plane->dev);
        struct qxl_bo *bo;
 
        if (!state->crtc || !state->fb)
@@ -476,7 +476,7 @@ static int qxl_primary_atomic_check(struct drm_plane *plane,
 static int qxl_primary_apply_cursor(struct drm_plane *plane)
 {
        struct drm_device *dev = plane->dev;
-       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_device *qdev = to_qxl(dev);
        struct drm_framebuffer *fb = plane->state->fb;
        struct qxl_crtc *qcrtc = to_qxl_crtc(plane->state->crtc);
        struct qxl_cursor_cmd *cmd;
@@ -523,7 +523,7 @@ out_free_release:
 static void qxl_primary_atomic_update(struct drm_plane *plane,
                                      struct drm_plane_state *old_state)
 {
-       struct qxl_device *qdev = plane->dev->dev_private;
+       struct qxl_device *qdev = to_qxl(plane->dev);
        struct qxl_bo *bo = gem_to_qxl_bo(plane->state->fb->obj[0]);
        struct qxl_bo *primary;
        struct drm_clip_rect norect = {
@@ -554,7 +554,7 @@ static void qxl_primary_atomic_update(struct drm_plane *plane,
 static void qxl_primary_atomic_disable(struct drm_plane *plane,
                                       struct drm_plane_state *old_state)
 {
-       struct qxl_device *qdev = plane->dev->dev_private;
+       struct qxl_device *qdev = to_qxl(plane->dev);
 
        if (old_state->fb) {
                struct qxl_bo *bo = gem_to_qxl_bo(old_state->fb->obj[0]);
@@ -570,7 +570,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
                                     struct drm_plane_state *old_state)
 {
        struct drm_device *dev = plane->dev;
-       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_device *qdev = to_qxl(dev);
        struct drm_framebuffer *fb = plane->state->fb;
        struct qxl_crtc *qcrtc = to_qxl_crtc(plane->state->crtc);
        struct qxl_release *release;
@@ -679,7 +679,7 @@ out_free_release:
 static void qxl_cursor_atomic_disable(struct drm_plane *plane,
                                      struct drm_plane_state *old_state)
 {
-       struct qxl_device *qdev = plane->dev->dev_private;
+       struct qxl_device *qdev = to_qxl(plane->dev);
        struct qxl_release *release;
        struct qxl_cursor_cmd *cmd;
        int ret;
@@ -762,7 +762,7 @@ static void qxl_calc_dumb_shadow(struct qxl_device *qdev,
 static int qxl_plane_prepare_fb(struct drm_plane *plane,
                                struct drm_plane_state *new_state)
 {
-       struct qxl_device *qdev = plane->dev->dev_private;
+       struct qxl_device *qdev = to_qxl(plane->dev);
        struct drm_gem_object *obj;
        struct qxl_bo *user_bo;
        struct qxl_surface surf;
@@ -923,7 +923,7 @@ static int qdev_crtc_init(struct drm_device *dev, int crtc_id)
 {
        struct qxl_crtc *qxl_crtc;
        struct drm_plane *primary, *cursor;
-       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_device *qdev = to_qxl(dev);
        int r;
 
        qxl_crtc = kzalloc(sizeof(struct qxl_crtc), GFP_KERNEL);
@@ -965,7 +965,7 @@ free_mem:
 static int qxl_conn_get_modes(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
-       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_device *qdev = to_qxl(dev);
        struct qxl_output *output = drm_connector_to_qxl_output(connector);
        unsigned int pwidth = 1024;
        unsigned int pheight = 768;
@@ -991,7 +991,7 @@ static enum drm_mode_status qxl_conn_mode_valid(struct drm_connector *connector,
                               struct drm_display_mode *mode)
 {
        struct drm_device *ddev = connector->dev;
-       struct qxl_device *qdev = ddev->dev_private;
+       struct qxl_device *qdev = to_qxl(ddev);
 
        if (qxl_check_mode(qdev, mode->hdisplay, mode->vdisplay) != 0)
                return MODE_BAD;
@@ -1021,7 +1021,7 @@ static enum drm_connector_status qxl_conn_detect(
        struct qxl_output *output =
                drm_connector_to_qxl_output(connector);
        struct drm_device *ddev = connector->dev;
-       struct qxl_device *qdev = ddev->dev_private;
+       struct qxl_device *qdev = to_qxl(ddev);
        bool connected = false;
 
        /* The first monitor is always connected */
@@ -1071,7 +1071,7 @@ static int qxl_mode_create_hotplug_mode_update_property(struct qxl_device *qdev)
 
 static int qdev_output_init(struct drm_device *dev, int num_output)
 {
-       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_device *qdev = to_qxl(dev);
        struct qxl_output *qxl_output;
        struct drm_connector *connector;
        struct drm_encoder *encoder;
index 4fda3f9b29f464eb926ee97e2865df12e7affa19..13872b882775d00c2ffdae0547418625efb36d4f 100644 (file)
@@ -81,13 +81,16 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                return -EINVAL; /* TODO: ENODEV ? */
        }
 
-       qdev = kzalloc(sizeof(struct qxl_device), GFP_KERNEL);
-       if (!qdev)
+       qdev = devm_drm_dev_alloc(&pdev->dev, &qxl_driver,
+                                 struct qxl_device, ddev);
+       if (IS_ERR(qdev)) {
+               pr_err("Unable to init drm dev");
                return -ENOMEM;
+       }
 
        ret = pci_enable_device(pdev);
        if (ret)
-               goto free_dev;
+               return ret;
 
        ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "qxl");
        if (ret)
@@ -101,7 +104,7 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                }
        }
 
-       ret = qxl_device_init(qdev, &qxl_driver, pdev);
+       ret = qxl_device_init(qdev, pdev);
        if (ret)
                goto put_vga;
 
@@ -128,14 +131,13 @@ put_vga:
                vga_put(pdev, VGA_RSRC_LEGACY_IO);
 disable_pci:
        pci_disable_device(pdev);
-free_dev:
-       kfree(qdev);
+
        return ret;
 }
 
 static void qxl_drm_release(struct drm_device *dev)
 {
-       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_device *qdev = to_qxl(dev);
 
        /*
         * TODO: qxl_device_fini() call should be in qxl_pci_remove(),
@@ -144,8 +146,6 @@ static void qxl_drm_release(struct drm_device *dev)
         */
        qxl_modeset_fini(qdev);
        qxl_device_fini(qdev);
-       dev->dev_private = NULL;
-       kfree(qdev);
 }
 
 static void
@@ -157,7 +157,6 @@ qxl_pci_remove(struct pci_dev *pdev)
        drm_atomic_helper_shutdown(dev);
        if (is_vga(pdev))
                vga_put(pdev, VGA_RSRC_LEGACY_IO);
-       drm_dev_put(dev);
 }
 
 DEFINE_DRM_GEM_FOPS(qxl_fops);
@@ -165,7 +164,7 @@ DEFINE_DRM_GEM_FOPS(qxl_fops);
 static int qxl_drm_freeze(struct drm_device *dev)
 {
        struct pci_dev *pdev = dev->pdev;
-       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_device *qdev = to_qxl(dev);
        int ret;
 
        ret = drm_mode_config_helper_suspend(dev);
@@ -187,7 +186,7 @@ static int qxl_drm_freeze(struct drm_device *dev)
 
 static int qxl_drm_resume(struct drm_device *dev, bool thaw)
 {
-       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_device *qdev = to_qxl(dev);
 
        qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
        if (!thaw) {
@@ -246,7 +245,7 @@ static int qxl_pm_restore(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct drm_device *drm_dev = pci_get_drvdata(pdev);
-       struct qxl_device *qdev = drm_dev->dev_private;
+       struct qxl_device *qdev = to_qxl(drm_dev);
 
        qxl_io_reset(qdev);
        return qxl_drm_resume(drm_dev, false);
index 27e45a2d6b52cdba60cc41dc0c753671e920b823..31e35f787df2c2a7bf23f54de16f7ac60b36a4b5 100644 (file)
@@ -190,13 +190,8 @@ struct qxl_debugfs {
        unsigned int num_files;
 };
 
-int qxl_debugfs_add_files(struct qxl_device *rdev,
-                            struct drm_info_list *files,
-                            unsigned int nfiles);
 int qxl_debugfs_fence_init(struct qxl_device *rdev);
 
-struct qxl_device;
-
 struct qxl_device {
        struct drm_device ddev;
 
@@ -276,11 +271,12 @@ struct qxl_device {
        int monitors_config_height;
 };
 
+#define to_qxl(dev) container_of(dev, struct qxl_device, ddev)
+
 extern const struct drm_ioctl_desc qxl_ioctls[];
 extern int qxl_max_ioctl;
 
-int qxl_device_init(struct qxl_device *qdev, struct drm_driver *drv,
-                   struct pci_dev *pdev);
+int qxl_device_init(struct qxl_device *qdev, struct pci_dev *pdev);
 void qxl_device_fini(struct qxl_device *qdev);
 
 int qxl_modeset_init(struct qxl_device *qdev);
@@ -442,8 +438,8 @@ int qxl_garbage_collect(struct qxl_device *qdev);
 
 /* debugfs */
 
-int qxl_debugfs_init(struct drm_minor *minor);
-int qxl_ttm_debugfs_init(struct qxl_device *qdev);
+void qxl_debugfs_init(struct drm_minor *minor);
+void qxl_ttm_debugfs_init(struct qxl_device *qdev);
 
 /* qxl_prime.c */
 int qxl_gem_prime_pin(struct drm_gem_object *obj);
@@ -461,9 +457,9 @@ int qxl_gem_prime_mmap(struct drm_gem_object *obj,
 int qxl_irq_init(struct qxl_device *qdev);
 irqreturn_t qxl_irq_handler(int irq, void *arg);
 
-int qxl_debugfs_add_files(struct qxl_device *qdev,
-                         struct drm_info_list *files,
-                         unsigned int nfiles);
+void qxl_debugfs_add_files(struct qxl_device *qdev,
+                          struct drm_info_list *files,
+                          unsigned int nfiles);
 
 int qxl_surface_id_alloc(struct qxl_device *qdev,
                         struct qxl_bo *surf);
index 272d19b677d8f9a10ed118915d2c65b7dcc15696..24e903383aa17c22231a1b5e326a327f86c896f3 100644 (file)
@@ -32,7 +32,7 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
                            struct drm_device *dev,
                            struct drm_mode_create_dumb *args)
 {
-       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_device *qdev = to_qxl(dev);
        struct qxl_bo *qobj;
        uint32_t handle;
        int r;
index 69f37db1027ad7852f7dbfefaf4e33b134c550ac..5ff6fa9b799c5a29122038f9a95106a2fea23a1c 100644 (file)
@@ -34,7 +34,7 @@ void qxl_gem_object_free(struct drm_gem_object *gobj)
        struct qxl_device *qdev;
        struct ttm_buffer_object *tbo;
 
-       qdev = (struct qxl_device *)gobj->dev->dev_private;
+       qdev = to_qxl(gobj->dev);
 
        qxl_surface_evict(qdev, qobj, false);
 
index 72f3f1bbb40c1de0cf3f6a77047530fa2e47ef85..13bd1d11c7036c85c56877475c1918cfe63c726a 100644 (file)
@@ -36,7 +36,7 @@
 static int qxl_alloc_ioctl(struct drm_device *dev, void *data,
                           struct drm_file *file_priv)
 {
-       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_device *qdev = to_qxl(dev);
        struct drm_qxl_alloc *qxl_alloc = data;
        int ret;
        struct qxl_bo *qobj;
@@ -64,7 +64,7 @@ static int qxl_alloc_ioctl(struct drm_device *dev, void *data,
 static int qxl_map_ioctl(struct drm_device *dev, void *data,
                         struct drm_file *file_priv)
 {
-       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_device *qdev = to_qxl(dev);
        struct drm_qxl_map *qxl_map = data;
 
        return qxl_mode_dumb_mmap(file_priv, &qdev->ddev, qxl_map->handle,
@@ -276,7 +276,7 @@ out_free_reloc:
 static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
                                struct drm_file *file_priv)
 {
-       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_device *qdev = to_qxl(dev);
        struct drm_qxl_execbuffer *execbuffer = data;
        struct drm_qxl_command user_cmd;
        int cmd_num;
@@ -301,7 +301,7 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
 static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
                                 struct drm_file *file)
 {
-       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_device *qdev = to_qxl(dev);
        struct drm_qxl_update_area *update_area = data;
        struct qxl_rect area = {.left = update_area->left,
                                .top = update_area->top,
@@ -351,7 +351,7 @@ out:
 static int qxl_getparam_ioctl(struct drm_device *dev, void *data,
                       struct drm_file *file_priv)
 {
-       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_device *qdev = to_qxl(dev);
        struct drm_qxl_getparam *param = data;
 
        switch (param->param) {
@@ -370,7 +370,7 @@ static int qxl_getparam_ioctl(struct drm_device *dev, void *data,
 static int qxl_clientcap_ioctl(struct drm_device *dev, void *data,
                                  struct drm_file *file_priv)
 {
-       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_device *qdev = to_qxl(dev);
        struct drm_qxl_clientcap *param = data;
        int byte, idx;
 
@@ -391,7 +391,7 @@ static int qxl_clientcap_ioctl(struct drm_device *dev, void *data,
 static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
                                struct drm_file *file)
 {
-       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_device *qdev = to_qxl(dev);
        struct drm_qxl_alloc_surf *param = data;
        struct qxl_bo *qobj;
        int handle;
index 8435af108632c8a7a4ba04c472072f4a5ac486ca..1ba5a702d7636d862756b9b3cb2705ca16021d40 100644 (file)
@@ -32,7 +32,7 @@
 irqreturn_t qxl_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = (struct drm_device *) arg;
-       struct qxl_device *qdev = (struct qxl_device *)dev->dev_private;
+       struct qxl_device *qdev = to_qxl(dev);
        uint32_t pending;
 
        pending = xchg(&qdev->ram_header->int_pending, 0);
index 70b20ee4741ae0b6f6522207b7b8136561486c24..a6d873052cd409729dfadbeaaf0a104999b72395 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/pci.h>
 
 #include <drm/drm_drv.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_probe_helper.h>
 
 #include "qxl_drv.h"
@@ -107,20 +108,12 @@ static void qxl_gc_work(struct work_struct *work)
 }
 
 int qxl_device_init(struct qxl_device *qdev,
-                   struct drm_driver *drv,
                    struct pci_dev *pdev)
 {
        int r, sb;
 
-       r = drm_dev_init(&qdev->ddev, drv, &pdev->dev);
-       if (r) {
-               pr_err("Unable to init drm dev");
-               goto error;
-       }
-
        qdev->ddev.pdev = pdev;
        pci_set_drvdata(pdev, &qdev->ddev);
-       qdev->ddev.dev_private = qdev;
 
        mutex_init(&qdev->gem.mutex);
        mutex_init(&qdev->update_area_mutex);
@@ -136,8 +129,7 @@ int qxl_device_init(struct qxl_device *qdev,
        qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0));
        if (!qdev->vram_mapping) {
                pr_err("Unable to create vram_mapping");
-               r = -ENOMEM;
-               goto error;
+               return -ENOMEM;
        }
 
        if (pci_resource_len(pdev, 4) > 0) {
@@ -218,7 +210,7 @@ int qxl_device_init(struct qxl_device *qdev,
                                &(qdev->ram_header->cursor_ring_hdr),
                                sizeof(struct qxl_command),
                                QXL_CURSOR_RING_SIZE,
-                               qdev->io_base + QXL_IO_NOTIFY_CMD,
+                               qdev->io_base + QXL_IO_NOTIFY_CURSOR,
                                false,
                                &qdev->cursor_event);
 
@@ -291,7 +283,6 @@ surface_mapping_free:
        io_mapping_free(qdev->surface_mapping);
 vram_mapping_free:
        io_mapping_free(qdev->vram_mapping);
-error:
        return r;
 }
 
index ab72dc3476e9cfc6d0b486376a89dcdbfa01ddd6..edc8a9916872c76444022026f2967f5b34f17b1d 100644 (file)
@@ -33,7 +33,7 @@ static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
        struct qxl_device *qdev;
 
        bo = to_qxl_bo(tbo);
-       qdev = (struct qxl_device *)bo->tbo.base.dev->dev_private;
+       qdev = to_qxl(bo->tbo.base.dev);
 
        qxl_surface_evict(qdev, bo, false);
        WARN_ON_ONCE(bo->map_count > 0);
index 2feca734c7b195fe385dea55417ad9c139c6655d..4fae3e393da14994723a458e78d6d9d4510e5211 100644 (file)
@@ -243,7 +243,7 @@ static int qxl_release_validate_bo(struct qxl_bo *bo)
                return ret;
 
        /* allocate a surface for reserved + validated buffers */
-       ret = qxl_bo_check_id(bo->tbo.base.dev->dev_private, bo);
+       ret = qxl_bo_check_id(to_qxl(bo->tbo.base.dev), bo);
        if (ret)
                return ret;
        return 0;
index 62a5e424971bef7345d665cf666790a2d453c47f..f09a712b1ed2fa2489a4210a4903467b23f4335a 100644 (file)
@@ -243,7 +243,7 @@ static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
        if (!qxl_ttm_bo_is_qxl_bo(bo))
                return;
        qbo = to_qxl_bo(bo);
-       qdev = qbo->tbo.base.dev->dev_private;
+       qdev = to_qxl(qbo->tbo.base.dev);
 
        if (bo->mem.mem_type == TTM_PL_PRIV && qbo->surface_id)
                qxl_surface_evict(qdev, qbo, new_mem ? true : false);
@@ -322,7 +322,7 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
 }
 #endif
 
-int qxl_ttm_debugfs_init(struct qxl_device *qdev)
+void qxl_ttm_debugfs_init(struct qxl_device *qdev)
 {
 #if defined(CONFIG_DEBUG_FS)
        static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
@@ -343,8 +343,6 @@ int qxl_ttm_debugfs_init(struct qxl_device *qdev)
                        qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV].priv;
 
        }
-       return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
-#else
-       return 0;
+       qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
 #endif
 }
index 9b4072f97215edca4630cb16ed68fb6a641b96ee..3e76ae5a17eea3c79f424fb05cac20ab4d98bfe3 100644 (file)
  */
 
 #include <linux/export.h>
+#include <linux/pci.h>
 
 #include <drm/drm_device.h>
-#include <drm/drm_pci.h>
+#include <drm/drm_legacy.h>
 #include <drm/drm_print.h>
 
 #include "ati_pcigart.h"
index c693b2ca03298ff0031c3bc3ce8bf7aeb86b457a..11c97edde54ddade659646b82ac466e39226b37b 100644 (file)
@@ -3,42 +3,13 @@
 # Makefile for the drm device driver.  This driver provides support for the
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
 
-ccflags-y := -Idrivers/gpu/drm/amd/include
-
 hostprogs := mkregtable
-clean-files := rn50_reg_safe.h r100_reg_safe.h r200_reg_safe.h rv515_reg_safe.h r300_reg_safe.h r420_reg_safe.h rs600_reg_safe.h r600_reg_safe.h evergreen_reg_safe.h cayman_reg_safe.h
+targets := rn50_reg_safe.h r100_reg_safe.h r200_reg_safe.h rv515_reg_safe.h r300_reg_safe.h r420_reg_safe.h rs600_reg_safe.h r600_reg_safe.h evergreen_reg_safe.h cayman_reg_safe.h
 
-quiet_cmd_mkregtable = MKREGTABLE $@
+quiet_cmd_mkregtable = MKREG   $@
       cmd_mkregtable = $(obj)/mkregtable $< > $@
 
-$(obj)/rn50_reg_safe.h: $(src)/reg_srcs/rn50 $(obj)/mkregtable
-       $(call if_changed,mkregtable)
-
-$(obj)/r100_reg_safe.h: $(src)/reg_srcs/r100 $(obj)/mkregtable
-       $(call if_changed,mkregtable)
-
-$(obj)/r200_reg_safe.h: $(src)/reg_srcs/r200 $(obj)/mkregtable
-       $(call if_changed,mkregtable)
-
-$(obj)/rv515_reg_safe.h: $(src)/reg_srcs/rv515 $(obj)/mkregtable
-       $(call if_changed,mkregtable)
-
-$(obj)/r300_reg_safe.h: $(src)/reg_srcs/r300 $(obj)/mkregtable
-       $(call if_changed,mkregtable)
-
-$(obj)/r420_reg_safe.h: $(src)/reg_srcs/r420 $(obj)/mkregtable
-       $(call if_changed,mkregtable)
-
-$(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable
-       $(call if_changed,mkregtable)
-
-$(obj)/r600_reg_safe.h: $(src)/reg_srcs/r600 $(obj)/mkregtable
-       $(call if_changed,mkregtable)
-
-$(obj)/evergreen_reg_safe.h: $(src)/reg_srcs/evergreen $(obj)/mkregtable
-       $(call if_changed,mkregtable)
-
-$(obj)/cayman_reg_safe.h: $(src)/reg_srcs/cayman $(obj)/mkregtable
+$(obj)/%_reg_safe.h: $(src)/reg_srcs/% $(obj)/mkregtable FORCE
        $(call if_changed,mkregtable)
 
 $(obj)/r100.o: $(obj)/r100_reg_safe.h $(obj)/rn50_reg_safe.h
index 2c27627b66593cbd0647dd2bdcab325b7f052077..f15b20da5315c848c88714c3ccc1174d2f9dd5d0 100644 (file)
@@ -1211,8 +1211,7 @@ static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32
        SDEBUG("<<\n");
 
 free:
-       if (ws)
-               kfree(ectx.ws);
+       kfree(ectx.ws);
        return ret;
 }
 
index a9257bed348497be3bd93de1577897985e6d570e..134aa2b01f9071d14f3658019d74aebaed0285e9 100644 (file)
@@ -65,13 +65,6 @@ static const struct ci_pt_defaults defaults_bonaire_xt =
        { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
 };
 
-static const struct ci_pt_defaults defaults_bonaire_pro =
-{
-       1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
-       { 0x8C,  0x23F, 0x244, 0xA6,  0x83,  0x85,  0x86,  0x86,  0x83,  0xDB,  0xDB,  0xDA,  0x67,  0x60,  0x5F  },
-       { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
-};
-
 static const struct ci_pt_defaults defaults_saturn_xt =
 {
        1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
@@ -79,13 +72,6 @@ static const struct ci_pt_defaults defaults_saturn_xt =
        { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
 };
 
-static const struct ci_pt_defaults defaults_saturn_pro =
-{
-       1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
-       { 0x96,  0x21D, 0x23B, 0xA1,  0x85,  0x87,  0x83,  0x84,  0x81,  0xE6,  0xE6,  0xE6,  0x71,  0x6A,  0x6A  },
-       { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
-};
-
 static const struct ci_pt_config_reg didt_config_ci[] =
 {
        { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
index 848ef68d90861bbc115b7a679ede8162baff6d06..5d25917251892bb09ae3879f464ef32a5fa19e49 100644 (file)
@@ -2111,7 +2111,7 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
                                                                    ucOverdriveThermalController];
                        info.addr = power_info->info.ucOverdriveControllerAddress >> 1;
                        strlcpy(info.type, name, sizeof(info.type));
-                       i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+                       i2c_new_client_device(&rdev->pm.i2c_bus->adapter, &info);
                }
        }
        num_modes = power_info->info.ucNumOfPowerModeEntries;
@@ -2351,7 +2351,7 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
                                const char *name = pp_lib_thermal_controller_names[controller->ucType];
                                info.addr = controller->ucI2cAddress >> 1;
                                strlcpy(info.type, name, sizeof(info.type));
-                               i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+                               i2c_new_client_device(&rdev->pm.i2c_bus->adapter, &info);
                        }
                } else {
                        DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
index c3e49c973812a1ec8fc7af297eaa6ca472b3fa70..d3c04df7e75d743eac15eb956ab1ff8febf76a39 100644 (file)
@@ -2704,7 +2704,7 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
                                const char *name = thermal_controller_names[thermal_controller];
                                info.addr = i2c_addr >> 1;
                                strlcpy(info.type, name, sizeof(info.type));
-                               i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+                               i2c_new_client_device(&rdev->pm.i2c_bus->adapter, &info);
                        }
                }
        } else {
@@ -2721,7 +2721,7 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
                                const char *name = "f75375";
                                info.addr = 0x28;
                                strlcpy(info.type, name, sizeof(info.type));
-                               i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+                               i2c_new_client_device(&rdev->pm.i2c_bus->adapter, &info);
                                DRM_INFO("Possible %s thermal controller at 0x%02x\n",
                                         name, info.addr);
                        }
index 59f8186a24151ee79240b4f14cc7276fda535ba5..bbb0883e8ce6a3184421c3ddcb2fca106a1e4352 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/vga_switcheroo.h>
 #include <linux/mmu_notifier.h>
+#include <linux/pci.h>
 
 #include <drm/drm_agpsupport.h>
 #include <drm/drm_crtc_helper.h>
@@ -44,7 +45,6 @@
 #include <drm/drm_file.h>
 #include <drm/drm_gem.h>
 #include <drm/drm_ioctl.h>
-#include <drm/drm_pci.h>
 #include <drm/drm_pciids.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_vblank.h>
index 372962358a18190d321ca685dd59e4dd8efd5246..c5d1dc9618a409f487bb3156c7818bd6e91aa061 100644 (file)
@@ -828,7 +828,7 @@ int radeon_enable_vblank_kms(struct drm_crtc *crtc)
        unsigned long irqflags;
        int r;
 
-       if (pipe < 0 || pipe >= rdev->num_crtc) {
+       if (pipe >= rdev->num_crtc) {
                DRM_ERROR("Invalid crtc %d\n", pipe);
                return -EINVAL;
        }
@@ -854,7 +854,7 @@ void radeon_disable_vblank_kms(struct drm_crtc *crtc)
        struct radeon_device *rdev = dev->dev_private;
        unsigned long irqflags;
 
-       if (pipe < 0 || pipe >= rdev->num_crtc) {
+       if (pipe >= rdev->num_crtc) {
                DRM_ERROR("Invalid crtc %d\n", pipe);
                return;
        }
index 2cb85dbe728f0ecbd764d9115e90cbcf7ae802ec..a167e1c36d2432719e1f3032d8e97577a074bc66 100644 (file)
@@ -252,24 +252,6 @@ static const struct si_dte_data dte_data_tahiti =
        false
 };
 
-static const struct si_dte_data dte_data_tahiti_le =
-{
-       { 0x1E8480, 0x7A1200, 0x2160EC0, 0x3938700, 0 },
-       { 0x7D, 0x7D, 0x4E4, 0xB00, 0 },
-       0x5,
-       0xAFC8,
-       0x64,
-       0x32,
-       1,
-       0,
-       0x10,
-       { 0x78, 0x7C, 0x82, 0x88, 0x8E, 0x94, 0x9A, 0xA0, 0xA6, 0xAC, 0xB0, 0xB4, 0xB8, 0xBC, 0xC0, 0xC4 },
-       { 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700 },
-       { 0x2AF8, 0x2AF8, 0x29BB, 0x27F9, 0x2637, 0x2475, 0x22B3, 0x20F1, 0x1F2F, 0x1D6D, 0x1734, 0x1414, 0x10F4, 0xDD4, 0xAB4, 0x794 },
-       85,
-       true
-};
-
 static const struct si_dte_data dte_data_tahiti_pro =
 {
        { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
index 654e2dd081460d3382592131602c98ecc893586b..3e67cf70f040215e1ed0245777091efeeaa4ee2a 100644 (file)
@@ -530,7 +530,6 @@ static int rcar_du_remove(struct platform_device *pdev)
        drm_dev_unregister(ddev);
 
        drm_kms_helper_poll_fini(ddev);
-       drm_mode_config_cleanup(ddev);
 
        drm_dev_put(ddev);
 
index c07c6a88aff0224ffe9a34669f13c390b622f544..b0335da0c1614609846bbc78594e00970518e82a 100644 (file)
@@ -13,6 +13,7 @@
 #include <drm/drm_crtc.h>
 #include <drm/drm_modeset_helper_vtables.h>
 #include <drm/drm_panel.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "rcar_du_drv.h"
 #include "rcar_du_encoder.h"
  * Encoder
  */
 
-static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
-};
-
-static const struct drm_encoder_funcs encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static unsigned int rcar_du_encoder_count_ports(struct device_node *node)
 {
        struct device_node *ports;
@@ -110,13 +104,11 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
                }
        }
 
-       ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs,
-                              DRM_MODE_ENCODER_NONE, NULL);
+       ret = drm_simple_encoder_init(rcdu->ddev, encoder,
+                                     DRM_MODE_ENCODER_NONE);
        if (ret < 0)
                goto done;
 
-       drm_encoder_helper_add(encoder, &encoder_helper_funcs);
-
        /*
         * Attach the bridge to the encoder. The bridge will create the
         * connector.
index fcfd916227d131a9b41ca157a91994ea3b6374ef..482329102f1925cd184bd5fbf29109c870c342a7 100644 (file)
@@ -712,7 +712,9 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
        unsigned int i;
        int ret;
 
-       drm_mode_config_init(dev);
+       ret = drmm_mode_config_init(dev);
+       if (ret)
+               return ret;
 
        dev->mode_config.min_width = 0;
        dev->mode_config.min_height = 0;
index c6430027169fc8a0daa6973876bfe7c52dab6376..a0021fc25b27c12054b5fd29bfbcc9e9a8f9b164 100644 (file)
@@ -785,13 +785,15 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp)
 
                drm_plane_create_alpha_property(&plane->plane);
 
-               if (type == DRM_PLANE_TYPE_PRIMARY)
-                       continue;
-
-               drm_object_attach_property(&plane->plane.base,
-                                          rcdu->props.colorkey,
-                                          RCAR_DU_COLORKEY_NONE);
-               drm_plane_create_zpos_property(&plane->plane, 1, 1, 7);
+               if (type == DRM_PLANE_TYPE_PRIMARY) {
+                       drm_plane_create_zpos_immutable_property(&plane->plane,
+                                                                0);
+               } else {
+                       drm_object_attach_property(&plane->plane.base,
+                                                  rcdu->props.colorkey,
+                                                  RCAR_DU_COLORKEY_NONE);
+                       drm_plane_create_zpos_property(&plane->plane, 1, 1, 7);
+               }
        }
 
        return 0;
index 5e4faf258c31a6b9042a4c47958449469348da3b..f1a81c9b184d4c427b2de1df4ac588e966c92142 100644 (file)
@@ -392,12 +392,14 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
                drm_plane_helper_add(&plane->plane,
                                     &rcar_du_vsp_plane_helper_funcs);
 
-               if (type == DRM_PLANE_TYPE_PRIMARY)
-                       continue;
-
-               drm_plane_create_alpha_property(&plane->plane);
-               drm_plane_create_zpos_property(&plane->plane, 1, 1,
-                                              vsp->num_planes - 1);
+               if (type == DRM_PLANE_TYPE_PRIMARY) {
+                       drm_plane_create_zpos_immutable_property(&plane->plane,
+                                                                0);
+               } else {
+                       drm_plane_create_alpha_property(&plane->plane);
+                       drm_plane_create_zpos_property(&plane->plane, 1, 1,
+                                                      vsp->num_planes - 1);
+               }
        }
 
        return 0;
index ce98c08aa8b446da487481ebf51a4f996a4efd77..ade2327a10e2c3d340bf91846f25f4748c34c70e 100644 (file)
@@ -26,6 +26,7 @@
 #include <drm/drm_of.h>
 #include <drm/drm_panel.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "rockchip_drm_drv.h"
 #include "rockchip_drm_vop.h"
@@ -258,10 +259,6 @@ static struct drm_encoder_helper_funcs rockchip_dp_encoder_helper_funcs = {
        .atomic_check = rockchip_dp_drm_encoder_atomic_check,
 };
 
-static struct drm_encoder_funcs rockchip_dp_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static int rockchip_dp_of_probe(struct rockchip_dp_device *dp)
 {
        struct device *dev = dp->dev;
@@ -309,8 +306,8 @@ static int rockchip_dp_drm_create_encoder(struct rockchip_dp_device *dp)
                                                             dev->of_node);
        DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
 
-       ret = drm_encoder_init(drm_dev, encoder, &rockchip_dp_encoder_funcs,
-                              DRM_MODE_ENCODER_TMDS, NULL);
+       ret = drm_simple_encoder_init(drm_dev, encoder,
+                                     DRM_MODE_ENCODER_TMDS);
        if (ret) {
                DRM_ERROR("failed to initialize encoder with drm\n");
                return ret;
index eed594bd38d35f3cb3d9fa93740cf032d16147d4..c634b95b50f7518040abe6ecf888812dc35c72af 100644 (file)
@@ -20,6 +20,7 @@
 #include <drm/drm_edid.h>
 #include <drm/drm_of.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "cdn-dp-core.h"
 #include "cdn-dp-reg.h"
@@ -689,10 +690,6 @@ static const struct drm_encoder_helper_funcs cdn_dp_encoder_helper_funcs = {
        .atomic_check = cdn_dp_encoder_atomic_check,
 };
 
-static const struct drm_encoder_funcs cdn_dp_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static int cdn_dp_parse_dt(struct cdn_dp_device *dp)
 {
        struct device *dev = dp->dev;
@@ -1030,8 +1027,8 @@ static int cdn_dp_bind(struct device *dev, struct device *master, void *data)
                                                             dev->of_node);
        DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
 
-       ret = drm_encoder_init(drm_dev, encoder, &cdn_dp_encoder_funcs,
-                              DRM_MODE_ENCODER_TMDS, NULL);
+       ret = drm_simple_encoder_init(drm_dev, encoder,
+                                     DRM_MODE_ENCODER_TMDS);
        if (ret) {
                DRM_ERROR("failed to initialize encoder with drm\n");
                return ret;
@@ -1109,7 +1106,7 @@ static const struct component_ops cdn_dp_component_ops = {
        .unbind = cdn_dp_unbind,
 };
 
-int cdn_dp_suspend(struct device *dev)
+static int cdn_dp_suspend(struct device *dev)
 {
        struct cdn_dp_device *dp = dev_get_drvdata(dev);
        int ret = 0;
@@ -1123,7 +1120,7 @@ int cdn_dp_suspend(struct device *dev)
        return ret;
 }
 
-int cdn_dp_resume(struct device *dev)
+static int cdn_dp_resume(struct device *dev)
 {
        struct cdn_dp_device *dp = dev_get_drvdata(dev);
 
index 7361c07cb4a78bd72b72e49bb8a27cbb452cdbf6..9d2163ef4d6e21e875356c346b9bd5ad3a2fb6d7 100644 (file)
@@ -601,7 +601,7 @@ static int cdn_dp_get_msa_misc(struct video_info *video,
        case YCBCR_4_2_0:
                val[0] = 5;
                break;
-       };
+       }
 
        switch (video->color_depth) {
        case 6:
@@ -619,7 +619,7 @@ static int cdn_dp_get_msa_misc(struct video_info *video,
        case 16:
                val[1] = 4;
                break;
-       };
+       }
 
        msa_misc = 2 * val[0] + 32 * val[1] +
                   ((video->color_fmt == Y_ONLY) ? (1 << 14) : 0);
@@ -700,7 +700,7 @@ int cdn_dp_config_video(struct cdn_dp_device *dp)
        case 16:
                val = BCS_16;
                break;
-       };
+       }
 
        val += video->color_fmt << 8;
        ret = cdn_dp_reg_write(dp, DP_FRAMER_PXL_REPR, val);
index 6e1270e45f974d444df8a7b9d2af176523d206c2..3feff0c45b3f745f5c4aa6d52dcf9d87a6c6ed03 100644 (file)
@@ -21,6 +21,7 @@
 #include <drm/bridge/dw_mipi_dsi.h>
 #include <drm/drm_mipi_dsi.h>
 #include <drm/drm_of.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "rockchip_drm_drv.h"
 #include "rockchip_drm_vop.h"
@@ -789,10 +790,6 @@ dw_mipi_dsi_encoder_helper_funcs = {
        .disable = dw_mipi_dsi_encoder_disable,
 };
 
-static const struct drm_encoder_funcs dw_mipi_dsi_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static int rockchip_dsi_drm_create_encoder(struct dw_mipi_dsi_rockchip *dsi,
                                           struct drm_device *drm_dev)
 {
@@ -802,8 +799,7 @@ static int rockchip_dsi_drm_create_encoder(struct dw_mipi_dsi_rockchip *dsi,
        encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev,
                                                             dsi->dev->of_node);
 
-       ret = drm_encoder_init(drm_dev, encoder, &dw_mipi_dsi_encoder_funcs,
-                              DRM_MODE_ENCODER_DSI, NULL);
+       ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_DSI);
        if (ret) {
                DRM_ERROR("Failed to initialize encoder with drm\n");
                return ret;
index 7f56d8c3491daccda86c3156b60dde8d4a43ee34..121aa8a63a7611c04441cf06a5f338a9a820ae99 100644 (file)
@@ -14,6 +14,7 @@
 #include <drm/drm_edid.h>
 #include <drm/drm_of.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "rockchip_drm_drv.h"
 #include "rockchip_drm_vop.h"
@@ -237,10 +238,6 @@ dw_hdmi_rockchip_mode_valid(struct drm_connector *connector,
        return (valid) ? MODE_OK : MODE_BAD;
 }
 
-static const struct drm_encoder_funcs dw_hdmi_rockchip_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static void dw_hdmi_rockchip_encoder_disable(struct drm_encoder *encoder)
 {
 }
@@ -546,8 +543,7 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
        }
 
        drm_encoder_helper_add(encoder, &dw_hdmi_rockchip_encoder_helper_funcs);
-       drm_encoder_init(drm, encoder, &dw_hdmi_rockchip_encoder_funcs,
-                        DRM_MODE_ENCODER_TMDS, NULL);
+       drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
 
        platform_set_drvdata(pdev, hdmi);
 
index e5864e8230205f9a014d80bc8d235a5ca89f4794..7afdc54eb3ec1246482f44045c777c04a6fb5385 100644 (file)
@@ -19,6 +19,7 @@
 #include <drm/drm_edid.h>
 #include <drm/drm_of.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "rockchip_drm_drv.h"
 #include "rockchip_drm_vop.h"
@@ -532,10 +533,6 @@ static struct drm_encoder_helper_funcs inno_hdmi_encoder_helper_funcs = {
        .atomic_check = inno_hdmi_encoder_atomic_check,
 };
 
-static struct drm_encoder_funcs inno_hdmi_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static enum drm_connector_status
 inno_hdmi_connector_detect(struct drm_connector *connector, bool force)
 {
@@ -617,8 +614,7 @@ static int inno_hdmi_register(struct drm_device *drm, struct inno_hdmi *hdmi)
                return -EPROBE_DEFER;
 
        drm_encoder_helper_add(encoder, &inno_hdmi_encoder_helper_funcs);
-       drm_encoder_init(drm, encoder, &inno_hdmi_encoder_funcs,
-                        DRM_MODE_ENCODER_TMDS, NULL);
+       drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
 
        hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
 
index fe203d38664eb5f2dea49d5dc1fc3078a321cf61..1c546c3a8998473a731da73871acf71be13e6ad4 100644 (file)
@@ -6,6 +6,7 @@
 
 #include <drm/drm_of.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include <linux/clk.h>
 #include <linux/mfd/syscon.h>
@@ -451,10 +452,6 @@ struct drm_encoder_helper_funcs rk3066_hdmi_encoder_helper_funcs = {
        .atomic_check = rk3066_hdmi_encoder_atomic_check,
 };
 
-static const struct drm_encoder_funcs rk3066_hdmi_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static enum drm_connector_status
 rk3066_hdmi_connector_detect(struct drm_connector *connector, bool force)
 {
@@ -557,8 +554,7 @@ rk3066_hdmi_register(struct drm_device *drm, struct rk3066_hdmi *hdmi)
                return -EPROBE_DEFER;
 
        drm_encoder_helper_add(encoder, &rk3066_hdmi_encoder_helper_funcs);
-       drm_encoder_init(drm, encoder, &rk3066_hdmi_encoder_funcs,
-                        DRM_MODE_ENCODER_TMDS, NULL);
+       drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
 
        hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
 
index 20ecb1508a2247992143fda3a8be830da110de17..0f3eb392fe39d5cc1f41d7c8a1dd074af75550ce 100644 (file)
@@ -135,14 +135,16 @@ static int rockchip_drm_bind(struct device *dev)
        if (ret)
                goto err_free;
 
-       drm_mode_config_init(drm_dev);
+       ret = drmm_mode_config_init(drm_dev);
+       if (ret)
+               goto err_iommu_cleanup;
 
        rockchip_drm_mode_config_init(drm_dev);
 
        /* Try to bind all sub drivers. */
        ret = component_bind_all(dev, drm_dev);
        if (ret)
-               goto err_mode_config_cleanup;
+               goto err_iommu_cleanup;
 
        ret = drm_vblank_init(drm_dev, drm_dev->mode_config.num_crtc);
        if (ret)
@@ -173,12 +175,9 @@ err_kms_helper_poll_fini:
        rockchip_drm_fbdev_fini(drm_dev);
 err_unbind_all:
        component_unbind_all(dev, drm_dev);
-err_mode_config_cleanup:
-       drm_mode_config_cleanup(drm_dev);
+err_iommu_cleanup:
        rockchip_iommu_cleanup(drm_dev);
 err_free:
-       drm_dev->dev_private = NULL;
-       dev_set_drvdata(dev, NULL);
        drm_dev_put(drm_dev);
        return ret;
 }
@@ -194,11 +193,8 @@ static void rockchip_drm_unbind(struct device *dev)
 
        drm_atomic_helper_shutdown(drm_dev);
        component_unbind_all(dev, drm_dev);
-       drm_mode_config_cleanup(drm_dev);
        rockchip_iommu_cleanup(drm_dev);
 
-       drm_dev->dev_private = NULL;
-       dev_set_drvdata(dev, NULL);
        drm_dev_put(drm_dev);
 }
 
index c5b06048124e3a8004c7b5ebd654e4f9af2a94e1..e33c2dcd0d4b6fe9a9a071fda0d0de80103407bb 100644 (file)
@@ -30,6 +30,7 @@ struct rockchip_crtc_state {
        int output_mode;
        int output_bpc;
        int output_flags;
+       bool enable_afbc;
 };
 #define to_rockchip_crtc_state(s) \
                container_of(s, struct rockchip_crtc_state, base)
index 221e72e71432105953b3704b76bef77aa6e8ed6e..9b13c784b3475280483cd621f79f2c72d7349479 100644 (file)
@@ -57,8 +57,49 @@ static const struct drm_mode_config_helper_funcs rockchip_mode_config_helpers =
        .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
 };
 
+static struct drm_framebuffer *
+rockchip_fb_create(struct drm_device *dev, struct drm_file *file,
+                  const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+       struct drm_afbc_framebuffer *afbc_fb;
+       const struct drm_format_info *info;
+       int ret;
+
+       info = drm_get_format_info(dev, mode_cmd);
+       if (!info)
+               return ERR_PTR(-ENOMEM);
+
+       afbc_fb = kzalloc(sizeof(*afbc_fb), GFP_KERNEL);
+       if (!afbc_fb)
+               return ERR_PTR(-ENOMEM);
+
+       ret = drm_gem_fb_init_with_funcs(dev, &afbc_fb->base, file, mode_cmd,
+                                        &rockchip_drm_fb_funcs);
+       if (ret) {
+               kfree(afbc_fb);
+               return ERR_PTR(ret);
+       }
+
+       if (drm_is_afbc(mode_cmd->modifier[0])) {
+               int ret, i;
+
+               ret = drm_gem_fb_afbc_init(dev, mode_cmd, afbc_fb);
+               if (ret) {
+                       struct drm_gem_object **obj = afbc_fb->base.obj;
+
+                       for (i = 0; i < info->num_planes; ++i)
+                               drm_gem_object_put_unlocked(obj[i]);
+
+                       kfree(afbc_fb);
+                       return ERR_PTR(ret);
+               }
+       }
+
+       return &afbc_fb->base;
+}
+
 static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = {
-       .fb_create = drm_gem_fb_create_with_dirty,
+       .fb_create = rockchip_fb_create,
        .output_poll_changed = drm_fb_helper_output_poll_changed,
        .atomic_check = drm_atomic_helper_check,
        .atomic_commit = drm_atomic_helper_commit,
index cecb2cc781f5493dd7fe4756bd4e0a635d1a3ffa..33463b79a37b3d1318d7cb789992b81833c246ce 100644 (file)
 #define VOP_WIN_TO_INDEX(vop_win) \
        ((vop_win) - (vop_win)->vop->win)
 
+#define VOP_AFBC_SET(vop, name, v) \
+       do { \
+               if ((vop)->data->afbc) \
+                       vop_reg_set((vop), &(vop)->data->afbc->name, \
+                                   0, ~0, v, #name); \
+       } while (0)
+
 #define to_vop(x) container_of(x, struct vop, crtc)
 #define to_vop_win(x) container_of(x, struct vop_win, base)
 
+#define AFBC_FMT_RGB565                0x0
+#define AFBC_FMT_U8U8U8U8      0x5
+#define AFBC_FMT_U8U8U8                0x4
+
+#define AFBC_TILE_16x16                BIT(4)
+
 /*
  * The coefficients of the following matrix are all fixed points.
  * The format is S2.10 for the 3x3 part of the matrix, and S9.12 for the offsets.
@@ -274,6 +287,29 @@ static enum vop_data_format vop_convert_format(uint32_t format)
        }
 }
 
+static int vop_convert_afbc_format(uint32_t format)
+{
+       switch (format) {
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_ARGB8888:
+       case DRM_FORMAT_XBGR8888:
+       case DRM_FORMAT_ABGR8888:
+               return AFBC_FMT_U8U8U8U8;
+       case DRM_FORMAT_RGB888:
+       case DRM_FORMAT_BGR888:
+               return AFBC_FMT_U8U8U8;
+       case DRM_FORMAT_RGB565:
+       case DRM_FORMAT_BGR565:
+               return AFBC_FMT_RGB565;
+       /* either of the below should not be reachable */
+       default:
+               DRM_WARN_ONCE("unsupported AFBC format[%08x]\n", format);
+               return -EINVAL;
+       }
+
+       return -EINVAL;
+}
+
 static uint16_t scl_vop_cal_scale(enum scale_mode mode, uint32_t src,
                                  uint32_t dst, bool is_horizontal,
                                  int vsu_mode, int *vskiplines)
@@ -598,6 +634,17 @@ static int vop_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state)
                        vop_win_disable(vop, vop_win);
                }
        }
+
+       if (vop->data->afbc) {
+               struct rockchip_crtc_state *s;
+               /*
+                * Disable AFBC and forget there was a vop window with AFBC
+                */
+               VOP_AFBC_SET(vop, enable, 0);
+               s = to_rockchip_crtc_state(crtc->state);
+               s->enable_afbc = false;
+       }
+
        spin_unlock(&vop->reg_lock);
 
        vop_cfg_done(vop);
@@ -710,6 +757,26 @@ static void vop_plane_destroy(struct drm_plane *plane)
        drm_plane_cleanup(plane);
 }
 
+static inline bool rockchip_afbc(u64 modifier)
+{
+       return modifier == ROCKCHIP_AFBC_MOD;
+}
+
+static bool rockchip_mod_supported(struct drm_plane *plane,
+                                  u32 format, u64 modifier)
+{
+       if (modifier == DRM_FORMAT_MOD_LINEAR)
+               return true;
+
+       if (!rockchip_afbc(modifier)) {
+               DRM_DEBUG_KMS("Unsupported format modifier 0x%llx\n", modifier);
+
+               return false;
+       }
+
+       return vop_convert_afbc_format(format) >= 0;
+}
+
 static int vop_plane_atomic_check(struct drm_plane *plane,
                           struct drm_plane_state *state)
 {
@@ -758,6 +825,30 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
                return -EINVAL;
        }
 
+       if (rockchip_afbc(fb->modifier)) {
+               struct vop *vop = to_vop(crtc);
+
+               if (!vop->data->afbc) {
+                       DRM_ERROR("vop does not support AFBC\n");
+                       return -EINVAL;
+               }
+
+               ret = vop_convert_afbc_format(fb->format->format);
+               if (ret < 0)
+                       return ret;
+
+               if (state->src.x1 || state->src.y1) {
+                       DRM_ERROR("AFBC does not support offset display, xpos=%d, ypos=%d, offset=%d\n", state->src.x1, state->src.y1, fb->offsets[0]);
+                       return -EINVAL;
+               }
+
+               if (state->rotation && state->rotation != DRM_MODE_ROTATE_0) {
+                       DRM_ERROR("No rotation support in AFBC, rotation=%d\n",
+                                 state->rotation);
+                       return -EINVAL;
+               }
+       }
+
        return 0;
 }
 
@@ -846,6 +937,16 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
 
        spin_lock(&vop->reg_lock);
 
+       if (rockchip_afbc(fb->modifier)) {
+               int afbc_format = vop_convert_afbc_format(fb->format->format);
+
+               VOP_AFBC_SET(vop, format, afbc_format | AFBC_TILE_16x16);
+               VOP_AFBC_SET(vop, hreg_block_split, 0);
+               VOP_AFBC_SET(vop, win_sel, VOP_WIN_TO_INDEX(vop_win));
+               VOP_AFBC_SET(vop, hdr_ptr, dma_addr);
+               VOP_AFBC_SET(vop, pic_size, act_info);
+       }
+
        VOP_WIN_SET(vop, win, format, format);
        VOP_WIN_SET(vop, win, yrgb_vir, DIV_ROUND_UP(fb->pitches[0], 4));
        VOP_WIN_SET(vop, win, yrgb_mst, dma_addr);
@@ -1001,6 +1102,7 @@ static const struct drm_plane_funcs vop_plane_funcs = {
        .reset = drm_atomic_helper_plane_reset,
        .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
        .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+       .format_mod_supported = rockchip_mod_supported,
 };
 
 static int vop_crtc_enable_vblank(struct drm_crtc *crtc)
@@ -1310,6 +1412,10 @@ static int vop_crtc_atomic_check(struct drm_crtc *crtc,
                                 struct drm_crtc_state *crtc_state)
 {
        struct vop *vop = to_vop(crtc);
+       struct drm_plane *plane;
+       struct drm_plane_state *plane_state;
+       struct rockchip_crtc_state *s;
+       int afbc_planes = 0;
 
        if (vop->lut_regs && crtc_state->color_mgmt_changed &&
            crtc_state->gamma_lut) {
@@ -1323,6 +1429,27 @@ static int vop_crtc_atomic_check(struct drm_crtc *crtc,
                }
        }
 
+       drm_atomic_crtc_state_for_each_plane(plane, crtc_state) {
+               plane_state =
+                       drm_atomic_get_plane_state(crtc_state->state, plane);
+               if (IS_ERR(plane_state)) {
+                       DRM_DEBUG_KMS("Cannot get plane state for plane %s\n",
+                                     plane->name);
+                       return PTR_ERR(plane_state);
+               }
+
+               if (drm_is_afbc(plane_state->fb->modifier))
+                       ++afbc_planes;
+       }
+
+       if (afbc_planes > 1) {
+               DRM_DEBUG_KMS("Invalid number of AFBC planes; got %d, expected at most 1\n", afbc_planes);
+               return -EINVAL;
+       }
+
+       s = to_rockchip_crtc_state(crtc_state);
+       s->enable_afbc = afbc_planes > 0;
+
        return 0;
 }
 
@@ -1333,6 +1460,7 @@ static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
        struct drm_plane_state *old_plane_state, *new_plane_state;
        struct vop *vop = to_vop(crtc);
        struct drm_plane *plane;
+       struct rockchip_crtc_state *s;
        int i;
 
        if (WARN_ON(!vop->is_enabled))
@@ -1340,6 +1468,9 @@ static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
 
        spin_lock(&vop->reg_lock);
 
+       /* Enable AFBC if there is some AFBC window, disable otherwise. */
+       s = to_rockchip_crtc_state(crtc->state);
+       VOP_AFBC_SET(vop, enable, s->enable_afbc);
        vop_cfg_done(vop);
 
        spin_unlock(&vop->reg_lock);
@@ -1634,7 +1765,8 @@ static int vop_create_crtc(struct vop *vop)
                                               0, &vop_plane_funcs,
                                               win_data->phy->data_formats,
                                               win_data->phy->nformats,
-                                              NULL, win_data->type, NULL);
+                                              win_data->phy->format_modifiers,
+                                              win_data->type, NULL);
                if (ret) {
                        DRM_DEV_ERROR(vop->dev, "failed to init plane %d\n",
                                      ret);
@@ -1678,7 +1810,8 @@ static int vop_create_crtc(struct vop *vop)
                                               &vop_plane_funcs,
                                               win_data->phy->data_formats,
                                               win_data->phy->nformats,
-                                              NULL, win_data->type, NULL);
+                                              win_data->phy->format_modifiers,
+                                              win_data->type, NULL);
                if (ret) {
                        DRM_DEV_ERROR(vop->dev, "failed to init overlay %d\n",
                                      ret);
index cc672620d6e04c9f690c28d4836c7e162c1c7123..d03bdb531ef2c42a95d59cd30dc517d28e2a4f69 100644 (file)
 
 #define NUM_YUV2YUV_COEFFICIENTS 12
 
+#define ROCKCHIP_AFBC_MOD \
+       DRM_FORMAT_MOD_ARM_AFBC( \
+               AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 | AFBC_FORMAT_MOD_SPARSE \
+       )
+
 enum vop_data_format {
        VOP_FMT_ARGB8888 = 0,
        VOP_FMT_RGB888,
@@ -34,6 +39,16 @@ struct vop_reg {
        bool relaxed;
 };
 
+struct vop_afbc {
+       struct vop_reg enable;
+       struct vop_reg win_sel;
+       struct vop_reg format;
+       struct vop_reg hreg_block_split;
+       struct vop_reg pic_size;
+       struct vop_reg hdr_ptr;
+       struct vop_reg rstn;
+};
+
 struct vop_modeset {
        struct vop_reg htotal_pw;
        struct vop_reg hact_st_end;
@@ -134,6 +149,7 @@ struct vop_win_phy {
        const struct vop_scl_regs *scl;
        const uint32_t *data_formats;
        uint32_t nformats;
+       const uint64_t *format_modifiers;
 
        struct vop_reg enable;
        struct vop_reg gate;
@@ -173,6 +189,7 @@ struct vop_data {
        const struct vop_misc *misc;
        const struct vop_modeset *modeset;
        const struct vop_output *output;
+       const struct vop_afbc *afbc;
        const struct vop_win_yuv2yuv_data *win_yuv2yuv;
        const struct vop_win_data *win;
        unsigned int win_size;
index 449a62908d213215e6f83782cd88e91fd54a1a1c..63f967902c2d847922199cdae79cf517dd4d74da 100644 (file)
 #include <linux/pm_runtime.h>
 #include <linux/regmap.h>
 #include <linux/reset.h>
+
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_bridge.h>
-
 #include <drm/drm_dp_helper.h>
 #include <drm/drm_of.h>
 #include <drm/drm_panel.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "rockchip_drm_drv.h"
 #include "rockchip_drm_vop.h"
@@ -435,10 +436,6 @@ struct drm_encoder_helper_funcs px30_lvds_encoder_helper_funcs = {
        .atomic_check = rockchip_lvds_encoder_atomic_check,
 };
 
-static const struct drm_encoder_funcs rockchip_lvds_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static int rk3288_lvds_probe(struct platform_device *pdev,
                             struct rockchip_lvds *lvds)
 {
@@ -607,8 +604,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
        encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev,
                                                             dev->of_node);
 
-       ret = drm_encoder_init(drm_dev, encoder, &rockchip_lvds_encoder_funcs,
-                              DRM_MODE_ENCODER_LVDS, NULL);
+       ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_LVDS);
        if (ret < 0) {
                DRM_DEV_ERROR(drm_dev->dev,
                              "failed to initialize encoder: %d\n", ret);
index 90784781e51596499677e95821c4c1a5e181b92d..9a771af5d0c9546c89f25238e85f28f03a3d13ba 100644 (file)
@@ -14,6 +14,7 @@
 #include <drm/drm_of.h>
 #include <drm/drm_panel.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "rockchip_drm_drv.h"
 #include "rockchip_drm_vop.h"
@@ -67,10 +68,6 @@ struct drm_encoder_helper_funcs rockchip_rgb_encoder_helper_funcs = {
        .atomic_check = rockchip_rgb_encoder_atomic_check,
 };
 
-static const struct drm_encoder_funcs rockchip_rgb_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
                                       struct drm_crtc *crtc,
                                       struct drm_device *drm_dev)
@@ -126,8 +123,7 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
        encoder = &rgb->encoder;
        encoder->possible_crtcs = drm_crtc_mask(crtc);
 
-       ret = drm_encoder_init(drm_dev, encoder, &rockchip_rgb_encoder_funcs,
-                              DRM_MODE_ENCODER_NONE, NULL);
+       ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_NONE);
        if (ret < 0) {
                DRM_DEV_ERROR(drm_dev->dev,
                              "failed to initialize encoder: %d\n", ret);
index 7a9d979c8d5d42e2c94b61a3ceed1e3f3ec5248f..2413deded22c5f58c0c90aab524939b86e2da2c4 100644 (file)
@@ -50,6 +50,17 @@ static const uint32_t formats_win_full[] = {
        DRM_FORMAT_NV24,
 };
 
+static const uint64_t format_modifiers_win_full[] = {
+       DRM_FORMAT_MOD_LINEAR,
+       DRM_FORMAT_MOD_INVALID,
+};
+
+static const uint64_t format_modifiers_win_full_afbc[] = {
+       ROCKCHIP_AFBC_MOD,
+       DRM_FORMAT_MOD_LINEAR,
+       DRM_FORMAT_MOD_INVALID,
+};
+
 static const uint32_t formats_win_lite[] = {
        DRM_FORMAT_XRGB8888,
        DRM_FORMAT_ARGB8888,
@@ -61,6 +72,11 @@ static const uint32_t formats_win_lite[] = {
        DRM_FORMAT_BGR565,
 };
 
+static const uint64_t format_modifiers_win_lite[] = {
+       DRM_FORMAT_MOD_LINEAR,
+       DRM_FORMAT_MOD_INVALID,
+};
+
 static const struct vop_scl_regs rk3036_win_scl = {
        .scale_yrgb_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
        .scale_yrgb_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
@@ -72,6 +88,7 @@ static const struct vop_win_phy rk3036_win0_data = {
        .scl = &rk3036_win_scl,
        .data_formats = formats_win_full,
        .nformats = ARRAY_SIZE(formats_win_full),
+       .format_modifiers = format_modifiers_win_full,
        .enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 0),
        .format = VOP_REG(RK3036_SYS_CTRL, 0x7, 3),
        .rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 15),
@@ -87,6 +104,7 @@ static const struct vop_win_phy rk3036_win0_data = {
 static const struct vop_win_phy rk3036_win1_data = {
        .data_formats = formats_win_lite,
        .nformats = ARRAY_SIZE(formats_win_lite),
+       .format_modifiers = format_modifiers_win_lite,
        .enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 1),
        .format = VOP_REG(RK3036_SYS_CTRL, 0x7, 6),
        .rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 19),
@@ -153,6 +171,7 @@ static const struct vop_data rk3036_vop = {
 static const struct vop_win_phy rk3126_win1_data = {
        .data_formats = formats_win_lite,
        .nformats = ARRAY_SIZE(formats_win_lite),
+       .format_modifiers = format_modifiers_win_lite,
        .enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 1),
        .format = VOP_REG(RK3036_SYS_CTRL, 0x7, 6),
        .rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 19),
@@ -234,6 +253,7 @@ static const struct vop_win_phy px30_win0_data = {
        .scl = &px30_win_scl,
        .data_formats = formats_win_full,
        .nformats = ARRAY_SIZE(formats_win_full),
+       .format_modifiers = format_modifiers_win_full,
        .enable = VOP_REG(PX30_WIN0_CTRL0, 0x1, 0),
        .format = VOP_REG(PX30_WIN0_CTRL0, 0x7, 1),
        .rb_swap = VOP_REG(PX30_WIN0_CTRL0, 0x1, 12),
@@ -249,6 +269,7 @@ static const struct vop_win_phy px30_win0_data = {
 static const struct vop_win_phy px30_win1_data = {
        .data_formats = formats_win_lite,
        .nformats = ARRAY_SIZE(formats_win_lite),
+       .format_modifiers = format_modifiers_win_lite,
        .enable = VOP_REG(PX30_WIN1_CTRL0, 0x1, 0),
        .format = VOP_REG(PX30_WIN1_CTRL0, 0x7, 4),
        .rb_swap = VOP_REG(PX30_WIN1_CTRL0, 0x1, 12),
@@ -261,6 +282,7 @@ static const struct vop_win_phy px30_win1_data = {
 static const struct vop_win_phy px30_win2_data = {
        .data_formats = formats_win_lite,
        .nformats = ARRAY_SIZE(formats_win_lite),
+       .format_modifiers = format_modifiers_win_lite,
        .gate = VOP_REG(PX30_WIN2_CTRL0, 0x1, 4),
        .enable = VOP_REG(PX30_WIN2_CTRL0, 0x1, 0),
        .format = VOP_REG(PX30_WIN2_CTRL0, 0x3, 5),
@@ -316,6 +338,7 @@ static const struct vop_win_phy rk3066_win0_data = {
        .scl = &rk3066_win_scl,
        .data_formats = formats_win_full,
        .nformats = ARRAY_SIZE(formats_win_full),
+       .format_modifiers = format_modifiers_win_full,
        .enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 0),
        .format = VOP_REG(RK3066_SYS_CTRL0, 0x7, 4),
        .rb_swap = VOP_REG(RK3066_SYS_CTRL0, 0x1, 19),
@@ -332,6 +355,7 @@ static const struct vop_win_phy rk3066_win1_data = {
        .scl = &rk3066_win_scl,
        .data_formats = formats_win_full,
        .nformats = ARRAY_SIZE(formats_win_full),
+       .format_modifiers = format_modifiers_win_full,
        .enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 1),
        .format = VOP_REG(RK3066_SYS_CTRL0, 0x7, 7),
        .rb_swap = VOP_REG(RK3066_SYS_CTRL0, 0x1, 23),
@@ -347,6 +371,7 @@ static const struct vop_win_phy rk3066_win1_data = {
 static const struct vop_win_phy rk3066_win2_data = {
        .data_formats = formats_win_lite,
        .nformats = ARRAY_SIZE(formats_win_lite),
+       .format_modifiers = format_modifiers_win_lite,
        .enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 2),
        .format = VOP_REG(RK3066_SYS_CTRL0, 0x7, 10),
        .rb_swap = VOP_REG(RK3066_SYS_CTRL0, 0x1, 27),
@@ -426,6 +451,7 @@ static const struct vop_win_phy rk3188_win0_data = {
        .scl = &rk3188_win_scl,
        .data_formats = formats_win_full,
        .nformats = ARRAY_SIZE(formats_win_full),
+       .format_modifiers = format_modifiers_win_full,
        .enable = VOP_REG(RK3188_SYS_CTRL, 0x1, 0),
        .format = VOP_REG(RK3188_SYS_CTRL, 0x7, 3),
        .rb_swap = VOP_REG(RK3188_SYS_CTRL, 0x1, 15),
@@ -440,6 +466,7 @@ static const struct vop_win_phy rk3188_win0_data = {
 static const struct vop_win_phy rk3188_win1_data = {
        .data_formats = formats_win_lite,
        .nformats = ARRAY_SIZE(formats_win_lite),
+       .format_modifiers = format_modifiers_win_lite,
        .enable = VOP_REG(RK3188_SYS_CTRL, 0x1, 1),
        .format = VOP_REG(RK3188_SYS_CTRL, 0x7, 6),
        .rb_swap = VOP_REG(RK3188_SYS_CTRL, 0x1, 19),
@@ -545,6 +572,7 @@ static const struct vop_win_phy rk3288_win01_data = {
        .scl = &rk3288_win_full_scl,
        .data_formats = formats_win_full,
        .nformats = ARRAY_SIZE(formats_win_full),
+       .format_modifiers = format_modifiers_win_full,
        .enable = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 0),
        .format = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 1),
        .rb_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 12),
@@ -563,6 +591,7 @@ static const struct vop_win_phy rk3288_win01_data = {
 static const struct vop_win_phy rk3288_win23_data = {
        .data_formats = formats_win_lite,
        .nformats = ARRAY_SIZE(formats_win_lite),
+       .format_modifiers = format_modifiers_win_lite,
        .enable = VOP_REG(RK3288_WIN2_CTRL0, 0x1, 4),
        .gate = VOP_REG(RK3288_WIN2_CTRL0, 0x1, 0),
        .format = VOP_REG(RK3288_WIN2_CTRL0, 0x7, 1),
@@ -677,6 +706,7 @@ static const struct vop_win_phy rk3368_win01_data = {
        .scl = &rk3288_win_full_scl,
        .data_formats = formats_win_full,
        .nformats = ARRAY_SIZE(formats_win_full),
+       .format_modifiers = format_modifiers_win_full,
        .enable = VOP_REG(RK3368_WIN0_CTRL0, 0x1, 0),
        .format = VOP_REG(RK3368_WIN0_CTRL0, 0x7, 1),
        .rb_swap = VOP_REG(RK3368_WIN0_CTRL0, 0x1, 12),
@@ -697,6 +727,7 @@ static const struct vop_win_phy rk3368_win01_data = {
 static const struct vop_win_phy rk3368_win23_data = {
        .data_formats = formats_win_lite,
        .nformats = ARRAY_SIZE(formats_win_lite),
+       .format_modifiers = format_modifiers_win_lite,
        .gate = VOP_REG(RK3368_WIN2_CTRL0, 0x1, 0),
        .enable = VOP_REG(RK3368_WIN2_CTRL0, 0x1, 4),
        .format = VOP_REG(RK3368_WIN2_CTRL0, 0x3, 5),
@@ -817,6 +848,53 @@ static const struct vop_win_yuv2yuv_data rk3399_vop_big_win_yuv2yuv_data[] = {
          .y2r_en = VOP_REG(RK3399_YUV2YUV_WIN, 0x1, 9) },
        { .base = 0xC0, .phy = &rk3399_yuv2yuv_win23_data },
        { .base = 0x120, .phy = &rk3399_yuv2yuv_win23_data },
+
+};
+
+static const struct vop_win_phy rk3399_win01_data = {
+       .scl = &rk3288_win_full_scl,
+       .data_formats = formats_win_full,
+       .nformats = ARRAY_SIZE(formats_win_full),
+       .format_modifiers = format_modifiers_win_full_afbc,
+       .enable = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 0),
+       .format = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 1),
+       .rb_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 12),
+       .y_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 22),
+       .act_info = VOP_REG(RK3288_WIN0_ACT_INFO, 0x1fff1fff, 0),
+       .dsp_info = VOP_REG(RK3288_WIN0_DSP_INFO, 0x0fff0fff, 0),
+       .dsp_st = VOP_REG(RK3288_WIN0_DSP_ST, 0x1fff1fff, 0),
+       .yrgb_mst = VOP_REG(RK3288_WIN0_YRGB_MST, 0xffffffff, 0),
+       .uv_mst = VOP_REG(RK3288_WIN0_CBR_MST, 0xffffffff, 0),
+       .yrgb_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 0),
+       .uv_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 16),
+       .src_alpha_ctl = VOP_REG(RK3288_WIN0_SRC_ALPHA_CTRL, 0xff, 0),
+       .dst_alpha_ctl = VOP_REG(RK3288_WIN0_DST_ALPHA_CTRL, 0xff, 0),
+};
+
+/*
+ * rk3399 vop big windows register layout is same as rk3288, but we
+ * have a separate rk3399 win data array here so that we can advertise
+ * AFBC on the primary plane.
+ */
+static const struct vop_win_data rk3399_vop_win_data[] = {
+       { .base = 0x00, .phy = &rk3399_win01_data,
+         .type = DRM_PLANE_TYPE_PRIMARY },
+       { .base = 0x40, .phy = &rk3288_win01_data,
+         .type = DRM_PLANE_TYPE_OVERLAY },
+       { .base = 0x00, .phy = &rk3288_win23_data,
+         .type = DRM_PLANE_TYPE_OVERLAY },
+       { .base = 0x50, .phy = &rk3288_win23_data,
+         .type = DRM_PLANE_TYPE_CURSOR },
+};
+
+static const struct vop_afbc rk3399_vop_afbc = {
+       .rstn = VOP_REG(RK3399_AFBCD0_CTRL, 0x1, 3),
+       .enable = VOP_REG(RK3399_AFBCD0_CTRL, 0x1, 0),
+       .win_sel = VOP_REG(RK3399_AFBCD0_CTRL, 0x3, 1),
+       .format = VOP_REG(RK3399_AFBCD0_CTRL, 0x1f, 16),
+       .hreg_block_split = VOP_REG(RK3399_AFBCD0_CTRL, 0x1, 21),
+       .hdr_ptr = VOP_REG(RK3399_AFBCD0_HDR_PTR, 0xffffffff, 0),
+       .pic_size = VOP_REG(RK3399_AFBCD0_PIC_SIZE, 0xffffffff, 0),
 };
 
 static const struct vop_data rk3399_vop_big = {
@@ -826,9 +904,10 @@ static const struct vop_data rk3399_vop_big = {
        .common = &rk3288_common,
        .modeset = &rk3288_modeset,
        .output = &rk3399_output,
+       .afbc = &rk3399_vop_afbc,
        .misc = &rk3368_misc,
-       .win = rk3368_vop_win_data,
-       .win_size = ARRAY_SIZE(rk3368_vop_win_data),
+       .win = rk3399_vop_win_data,
+       .win_size = ARRAY_SIZE(rk3399_vop_win_data),
        .win_yuv2yuv = rk3399_vop_big_win_yuv2yuv_data,
 };
 
index 75a752d59ef1768e890a1b8306a8e45f0a44a254..03556dbfcafbf2a68ce6d511d029e5c06493d70a 100644 (file)
@@ -17,6 +17,7 @@
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 #include <drm/drm_vblank.h>
 
 #include "shmob_drm_backlight.h"
@@ -558,15 +559,6 @@ static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
        .mode_set = shmob_drm_encoder_mode_set,
 };
 
-static void shmob_drm_encoder_destroy(struct drm_encoder *encoder)
-{
-       drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs encoder_funcs = {
-       .destroy = shmob_drm_encoder_destroy,
-};
-
 int shmob_drm_encoder_create(struct shmob_drm_device *sdev)
 {
        struct drm_encoder *encoder = &sdev->encoder.encoder;
@@ -576,8 +568,8 @@ int shmob_drm_encoder_create(struct shmob_drm_device *sdev)
 
        encoder->possible_crtcs = 1;
 
-       ret = drm_encoder_init(sdev->ddev, encoder, &encoder_funcs,
-                              DRM_MODE_ENCODER_LVDS, NULL);
+       ret = drm_simple_encoder_init(sdev->ddev, encoder,
+                                     DRM_MODE_ENCODER_LVDS);
        if (ret < 0)
                return ret;
 
index b8c0930959c7e84bb0ec9b7d1a96a10bcd338c65..ae9d6b8d3ca8764f2b6c63ffb0d61c51e1b3ced5 100644 (file)
@@ -192,7 +192,6 @@ static int shmob_drm_remove(struct platform_device *pdev)
 
        drm_dev_unregister(ddev);
        drm_kms_helper_poll_fini(ddev);
-       drm_mode_config_cleanup(ddev);
        drm_irq_uninstall(ddev);
        drm_dev_put(ddev);
 
@@ -288,7 +287,6 @@ err_irq_uninstall:
        drm_irq_uninstall(ddev);
 err_modeset_cleanup:
        drm_kms_helper_poll_fini(ddev);
-       drm_mode_config_cleanup(ddev);
 err_free_drm_dev:
        drm_dev_put(ddev);
 
index c51197b6fd8547420c1ba14c151dfc82e5510296..7a866d6ce6bb56461fc0efe8838c59a79f0dd453 100644 (file)
@@ -126,7 +126,11 @@ static const struct drm_mode_config_funcs shmob_drm_mode_config_funcs = {
 
 int shmob_drm_modeset_init(struct shmob_drm_device *sdev)
 {
-       drm_mode_config_init(sdev->ddev);
+       int ret;
+
+       ret = drmm_mode_config_init(sdev->ddev);
+       if (ret)
+               return ret;
 
        shmob_drm_crtc_create(sdev);
        shmob_drm_encoder_create(sdev);
index c7652584255d3762387a856c0e807b8f3b80b5fb..319962a2c17bf6090c62550301ab420c8d5f9264 100644 (file)
@@ -42,8 +42,8 @@ static const struct sti_compositor_data stih407_compositor_data = {
        },
 };
 
-int sti_compositor_debugfs_init(struct sti_compositor *compo,
-                               struct drm_minor *minor)
+void sti_compositor_debugfs_init(struct sti_compositor *compo,
+                                struct drm_minor *minor)
 {
        unsigned int i;
 
@@ -54,8 +54,6 @@ int sti_compositor_debugfs_init(struct sti_compositor *compo,
        for (i = 0; i < STI_MAX_MIXER; i++)
                if (compo->mixer[i])
                        sti_mixer_debugfs_init(compo->mixer[i], minor);
-
-       return 0;
 }
 
 static int sti_compositor_bind(struct device *dev,
index ac4bb38348102e597613d5f80e02cd4e16e9b825..25bb01bdd013f897faa4ffd151d3522fc362587b 100644 (file)
@@ -79,7 +79,7 @@ struct sti_compositor {
        struct notifier_block vtg_vblank_nb[STI_MAX_MIXER];
 };
 
-int sti_compositor_debugfs_init(struct sti_compositor *compo,
-                               struct drm_minor *minor);
+void sti_compositor_debugfs_init(struct sti_compositor *compo,
+                                struct drm_minor *minor);
 
 #endif
index 49e6cb8f58367ee198d4a2e9146be0ac4b31b259..6f37c104c46f909f057b10df0a47dc561b868636 100644 (file)
@@ -319,7 +319,7 @@ static int sti_crtc_late_register(struct drm_crtc *crtc)
        struct sti_compositor *compo = dev_get_drvdata(mixer->dev);
 
        if (drm_crtc_index(crtc) == 0)
-               return sti_compositor_debugfs_init(compo, crtc->dev->primary);
+               sti_compositor_debugfs_init(compo, crtc->dev->primary);
 
        return 0;
 }
index ea64c1dcaf634042a41e3bef1162eda03630c521..a9805743102375bb55696c33dc2fa8f317b618f6 100644 (file)
@@ -131,17 +131,17 @@ static struct drm_info_list cursor_debugfs_files[] = {
        { "cursor", cursor_dbg_show, 0, NULL },
 };
 
-static int cursor_debugfs_init(struct sti_cursor *cursor,
-                              struct drm_minor *minor)
+static void cursor_debugfs_init(struct sti_cursor *cursor,
+                               struct drm_minor *minor)
 {
        unsigned int i;
 
        for (i = 0; i < ARRAY_SIZE(cursor_debugfs_files); i++)
                cursor_debugfs_files[i].data = cursor;
 
-       return drm_debugfs_create_files(cursor_debugfs_files,
-                                       ARRAY_SIZE(cursor_debugfs_files),
-                                       minor->debugfs_root, minor);
+       drm_debugfs_create_files(cursor_debugfs_files,
+                                ARRAY_SIZE(cursor_debugfs_files),
+                                minor->debugfs_root, minor);
 }
 
 static void sti_cursor_argb8888_to_clut8(struct sti_cursor *cursor, u32 *src)
@@ -342,7 +342,9 @@ static int sti_cursor_late_register(struct drm_plane *drm_plane)
        struct sti_plane *plane = to_sti_plane(drm_plane);
        struct sti_cursor *cursor = to_sti_cursor(plane);
 
-       return cursor_debugfs_init(cursor, drm_plane->dev->primary);
+       cursor_debugfs_init(cursor, drm_plane->dev->primary);
+
+       return 0;
 }
 
 static const struct drm_plane_funcs sti_cursor_plane_helpers_funcs = {
index 50870d8cbb76739bcee4d9b5ea867fc7b0f2401a..3f9db3e3f39780799197cb0a2abc166dbaf85860 100644 (file)
@@ -92,24 +92,16 @@ static struct drm_info_list sti_drm_dbg_list[] = {
        {"fps_get", sti_drm_fps_dbg_show, 0},
 };
 
-static int sti_drm_dbg_init(struct drm_minor *minor)
+static void sti_drm_dbg_init(struct drm_minor *minor)
 {
-       int ret;
-
-       ret = drm_debugfs_create_files(sti_drm_dbg_list,
-                                      ARRAY_SIZE(sti_drm_dbg_list),
-                                      minor->debugfs_root, minor);
-       if (ret)
-               goto err;
+       drm_debugfs_create_files(sti_drm_dbg_list,
+                                ARRAY_SIZE(sti_drm_dbg_list),
+                                minor->debugfs_root, minor);
 
        debugfs_create_file("fps_show", S_IRUGO | S_IWUSR, minor->debugfs_root,
                            minor->dev, &sti_drm_fps_fops);
 
        DRM_INFO("%s: debugfs installed\n", DRIVER_NAME);
-       return 0;
-err:
-       DRM_ERROR("%s: cannot install debugfs\n", DRIVER_NAME);
-       return ret;
 }
 
 static const struct drm_mode_config_funcs sti_mode_config_funcs = {
index 3d04bfca21a06232d6fe5ff54a35a0f4d4c4fab3..de4af7735c469c8620acd803866c2d4cc7451fe2 100644 (file)
@@ -196,16 +196,16 @@ static struct drm_info_list dvo_debugfs_files[] = {
        { "dvo", dvo_dbg_show, 0, NULL },
 };
 
-static int dvo_debugfs_init(struct sti_dvo *dvo, struct drm_minor *minor)
+static void dvo_debugfs_init(struct sti_dvo *dvo, struct drm_minor *minor)
 {
        unsigned int i;
 
        for (i = 0; i < ARRAY_SIZE(dvo_debugfs_files); i++)
                dvo_debugfs_files[i].data = dvo;
 
-       return drm_debugfs_create_files(dvo_debugfs_files,
-                                       ARRAY_SIZE(dvo_debugfs_files),
-                                       minor->debugfs_root, minor);
+       drm_debugfs_create_files(dvo_debugfs_files,
+                                ARRAY_SIZE(dvo_debugfs_files),
+                                minor->debugfs_root, minor);
 }
 
 static void sti_dvo_disable(struct drm_bridge *bridge)
@@ -405,10 +405,7 @@ static int sti_dvo_late_register(struct drm_connector *connector)
                = to_sti_dvo_connector(connector);
        struct sti_dvo *dvo = dvo_connector->dvo;
 
-       if (dvo_debugfs_init(dvo, dvo->drm_dev->primary)) {
-               DRM_ERROR("DVO debugfs setup failed\n");
-               return -EINVAL;
-       }
+       dvo_debugfs_init(dvo, dvo->drm_dev->primary);
 
        return 0;
 }
index 11595c748844f6f0a088f698b4c4cf0011142bf7..2d5a2b5b78b8e5dd5f202dbb009b09c1615e8ef4 100644 (file)
@@ -343,9 +343,10 @@ static int gdp_debugfs_init(struct sti_gdp *gdp, struct drm_minor *minor)
        for (i = 0; i < nb_files; i++)
                gdp_debugfs_files[i].data = gdp;
 
-       return drm_debugfs_create_files(gdp_debugfs_files,
-                                       nb_files,
-                                       minor->debugfs_root, minor);
+       drm_debugfs_create_files(gdp_debugfs_files,
+                                nb_files,
+                                minor->debugfs_root, minor);
+       return 0;
 }
 
 static int sti_gdp_fourcc2format(int fourcc)
index f3f28d79b0e40ea3a22685351f67022c1e060e36..a1ec891eaf3aaad720b6ddd797b14892753224a4 100644 (file)
@@ -367,16 +367,16 @@ static struct drm_info_list hda_debugfs_files[] = {
        { "hda", hda_dbg_show, 0, NULL },
 };
 
-static int hda_debugfs_init(struct sti_hda *hda, struct drm_minor *minor)
+static void hda_debugfs_init(struct sti_hda *hda, struct drm_minor *minor)
 {
        unsigned int i;
 
        for (i = 0; i < ARRAY_SIZE(hda_debugfs_files); i++)
                hda_debugfs_files[i].data = hda;
 
-       return drm_debugfs_create_files(hda_debugfs_files,
-                                       ARRAY_SIZE(hda_debugfs_files),
-                                       minor->debugfs_root, minor);
+       drm_debugfs_create_files(hda_debugfs_files,
+                                ARRAY_SIZE(hda_debugfs_files),
+                                minor->debugfs_root, minor);
 }
 
 /**
@@ -643,10 +643,7 @@ static int sti_hda_late_register(struct drm_connector *connector)
                = to_sti_hda_connector(connector);
        struct sti_hda *hda = hda_connector->hda;
 
-       if (hda_debugfs_init(hda, hda->drm_dev->primary)) {
-               DRM_ERROR("HDA debugfs setup failed\n");
-               return -EINVAL;
-       }
+       hda_debugfs_init(hda, hda->drm_dev->primary);
 
        return 0;
 }
index 18eaf786ffa46640060e447adc80e8fb90131c5e..5b15c4974e6b5235fd1e32bc0cc7b46ee492e891 100644 (file)
@@ -727,16 +727,16 @@ static struct drm_info_list hdmi_debugfs_files[] = {
        { "hdmi", hdmi_dbg_show, 0, NULL },
 };
 
-static int hdmi_debugfs_init(struct sti_hdmi *hdmi, struct drm_minor *minor)
+static void hdmi_debugfs_init(struct sti_hdmi *hdmi, struct drm_minor *minor)
 {
        unsigned int i;
 
        for (i = 0; i < ARRAY_SIZE(hdmi_debugfs_files); i++)
                hdmi_debugfs_files[i].data = hdmi;
 
-       return drm_debugfs_create_files(hdmi_debugfs_files,
-                                       ARRAY_SIZE(hdmi_debugfs_files),
-                                       minor->debugfs_root, minor);
+       drm_debugfs_create_files(hdmi_debugfs_files,
+                                ARRAY_SIZE(hdmi_debugfs_files),
+                                minor->debugfs_root, minor);
 }
 
 static void sti_hdmi_disable(struct drm_bridge *bridge)
@@ -1113,10 +1113,7 @@ static int sti_hdmi_late_register(struct drm_connector *connector)
                = to_sti_hdmi_connector(connector);
        struct sti_hdmi *hdmi = hdmi_connector->hdmi;
 
-       if (hdmi_debugfs_init(hdmi, hdmi->drm_dev->primary)) {
-               DRM_ERROR("HDMI debugfs setup failed\n");
-               return -EINVAL;
-       }
+       hdmi_debugfs_init(hdmi, hdmi->drm_dev->primary);
 
        return 0;
 }
index 1015abe0ce086f8bf489ba76d7e203d402890e49..5a4e12194a77dbef4c2958d2e55fd261546ed2c1 100644 (file)
@@ -639,16 +639,16 @@ static struct drm_info_list hqvdp_debugfs_files[] = {
        { "hqvdp", hqvdp_dbg_show, 0, NULL },
 };
 
-static int hqvdp_debugfs_init(struct sti_hqvdp *hqvdp, struct drm_minor *minor)
+static void hqvdp_debugfs_init(struct sti_hqvdp *hqvdp, struct drm_minor *minor)
 {
        unsigned int i;
 
        for (i = 0; i < ARRAY_SIZE(hqvdp_debugfs_files); i++)
                hqvdp_debugfs_files[i].data = hqvdp;
 
-       return drm_debugfs_create_files(hqvdp_debugfs_files,
-                                       ARRAY_SIZE(hqvdp_debugfs_files),
-                                       minor->debugfs_root, minor);
+       drm_debugfs_create_files(hqvdp_debugfs_files,
+                                ARRAY_SIZE(hqvdp_debugfs_files),
+                                minor->debugfs_root, minor);
 }
 
 /**
@@ -1274,7 +1274,9 @@ static int sti_hqvdp_late_register(struct drm_plane *drm_plane)
        struct sti_plane *plane = to_sti_plane(drm_plane);
        struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
 
-       return hqvdp_debugfs_init(hqvdp, drm_plane->dev->primary);
+       hqvdp_debugfs_init(hqvdp, drm_plane->dev->primary);
+
+       return 0;
 }
 
 static const struct drm_plane_funcs sti_hqvdp_plane_helpers_funcs = {
index c3a3e1e5fc8ab8317df27fbb5bcf4ea110a55098..7e5f14646625b437a28b94c4a173ed7dc9ee9d23 100644 (file)
@@ -178,7 +178,7 @@ static struct drm_info_list mixer1_debugfs_files[] = {
        { "mixer_aux", mixer_dbg_show, 0, NULL },
 };
 
-int sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor)
+void sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor)
 {
        unsigned int i;
        struct drm_info_list *mixer_debugfs_files;
@@ -194,15 +194,15 @@ int sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor)
                nb_files = ARRAY_SIZE(mixer1_debugfs_files);
                break;
        default:
-               return -EINVAL;
+               return;
        }
 
        for (i = 0; i < nb_files; i++)
                mixer_debugfs_files[i].data = mixer;
 
-       return drm_debugfs_create_files(mixer_debugfs_files,
-                                       nb_files,
-                                       minor->debugfs_root, minor);
+       drm_debugfs_create_files(mixer_debugfs_files,
+                                nb_files,
+                                minor->debugfs_root, minor);
 }
 
 void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable)
index d9544246913a107da24f5abff1c0fb9e3a1409e4..ab06beb7b25843b6df6ea333ad3220bb5d5c0213 100644 (file)
@@ -58,7 +58,7 @@ int sti_mixer_active_video_area(struct sti_mixer *mixer,
 
 void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable);
 
-int sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor);
+void sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor);
 
 /* depth in Cross-bar control = z order */
 #define GAM_MIXER_NB_DEPTH_LEVEL 6
index c36a8da373cb5d81b0269d31a1295f07b2b83bbb..df3817f0fd30231d3a4d3789e36e297775b7a025 100644 (file)
@@ -570,16 +570,16 @@ static struct drm_info_list tvout_debugfs_files[] = {
        { "tvout", tvout_dbg_show, 0, NULL },
 };
 
-static int tvout_debugfs_init(struct sti_tvout *tvout, struct drm_minor *minor)
+static void tvout_debugfs_init(struct sti_tvout *tvout, struct drm_minor *minor)
 {
        unsigned int i;
 
        for (i = 0; i < ARRAY_SIZE(tvout_debugfs_files); i++)
                tvout_debugfs_files[i].data = tvout;
 
-       return drm_debugfs_create_files(tvout_debugfs_files,
-                                       ARRAY_SIZE(tvout_debugfs_files),
-                                       minor->debugfs_root, minor);
+       drm_debugfs_create_files(tvout_debugfs_files,
+                                ARRAY_SIZE(tvout_debugfs_files),
+                                minor->debugfs_root, minor);
 }
 
 static void sti_tvout_encoder_dpms(struct drm_encoder *encoder, int mode)
@@ -603,14 +603,11 @@ static void sti_tvout_encoder_destroy(struct drm_encoder *encoder)
 static int sti_tvout_late_register(struct drm_encoder *encoder)
 {
        struct sti_tvout *tvout = to_sti_tvout(encoder);
-       int ret;
 
        if (tvout->debugfs_registered)
                return 0;
 
-       ret = tvout_debugfs_init(tvout, encoder->dev->primary);
-       if (ret)
-               return ret;
+       tvout_debugfs_init(tvout, encoder->dev->primary);
 
        tvout->debugfs_registered = true;
        return 0;
index 2d4230410464471fe615f29b4155976c3222d1f7..2d818397918db832ccacd76dab86fdd54563a737 100644 (file)
@@ -124,16 +124,16 @@ static struct drm_info_list vid_debugfs_files[] = {
        { "vid", vid_dbg_show, 0, NULL },
 };
 
-int vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor)
+void vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor)
 {
        unsigned int i;
 
        for (i = 0; i < ARRAY_SIZE(vid_debugfs_files); i++)
                vid_debugfs_files[i].data = vid;
 
-       return drm_debugfs_create_files(vid_debugfs_files,
-                                       ARRAY_SIZE(vid_debugfs_files),
-                                       minor->debugfs_root, minor);
+       drm_debugfs_create_files(vid_debugfs_files,
+                                ARRAY_SIZE(vid_debugfs_files),
+                                minor->debugfs_root, minor);
 }
 
 void sti_vid_commit(struct sti_vid *vid,
index 9dbd78461de12b419e4e4d7ae00b54c8be4d12d6..991849ba50b58845e181c071461543d8a17a1800 100644 (file)
@@ -26,6 +26,6 @@ void sti_vid_disable(struct sti_vid *vid);
 struct sti_vid *sti_vid_create(struct device *dev, struct drm_device *drm_dev,
                               int id, void __iomem *baseaddr);
 
-int vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor);
+void vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor);
 
 #endif
index ea9fcbdc68b397787c84edb609a21a007be2e093..0f85dd86cafa73f24f1ec33e3ed8fb36a764eabc 100644 (file)
@@ -88,7 +88,9 @@ static int drv_load(struct drm_device *ddev)
 
        ddev->dev_private = (void *)ldev;
 
-       drm_mode_config_init(ddev);
+       ret = drmm_mode_config_init(ddev);
+       if (ret)
+               return ret;
 
        /*
         * set max width and height as default value.
@@ -103,7 +105,7 @@ static int drv_load(struct drm_device *ddev)
 
        ret = ltdc_load(ddev);
        if (ret)
-               goto err;
+               return ret;
 
        drm_mode_config_reset(ddev);
        drm_kms_helper_poll_init(ddev);
@@ -111,9 +113,6 @@ static int drv_load(struct drm_device *ddev)
        platform_set_drvdata(pdev, ddev);
 
        return 0;
-err:
-       drm_mode_config_cleanup(ddev);
-       return ret;
 }
 
 static void drv_unload(struct drm_device *ddev)
@@ -122,7 +121,6 @@ static void drv_unload(struct drm_device *ddev)
 
        drm_kms_helper_poll_fini(ddev);
        ltdc_unload(ddev);
-       drm_mode_config_cleanup(ddev);
 }
 
 static __maybe_unused int drv_suspend(struct device *dev)
index df585fe64f614913f593db359661b1cbccf8dc83..f894968d6e452a7ca10d97f78aea0da00519abaa 100644 (file)
@@ -42,8 +42,6 @@
 
 #define MAX_IRQ 4
 
-#define MAX_ENDPOINTS 2
-
 #define HWVER_10200 0x010200
 #define HWVER_10300 0x010300
 #define HWVER_20101 0x020101
@@ -1201,36 +1199,20 @@ int ltdc_load(struct drm_device *ddev)
        struct ltdc_device *ldev = ddev->dev_private;
        struct device *dev = ddev->dev;
        struct device_node *np = dev->of_node;
-       struct drm_bridge *bridge[MAX_ENDPOINTS] = {NULL};
-       struct drm_panel *panel[MAX_ENDPOINTS] = {NULL};
+       struct drm_bridge *bridge;
+       struct drm_panel *panel;
        struct drm_crtc *crtc;
        struct reset_control *rstc;
        struct resource *res;
-       int irq, ret, i, endpoint_not_ready = -ENODEV;
+       int irq, i, nb_endpoints;
+       int ret = -ENODEV;
 
        DRM_DEBUG_DRIVER("\n");
 
-       /* Get endpoints if any */
-       for (i = 0; i < MAX_ENDPOINTS; i++) {
-               ret = drm_of_find_panel_or_bridge(np, 0, i, &panel[i],
-                                                 &bridge[i]);
-
-               /*
-                * If at least one endpoint is -EPROBE_DEFER, defer probing,
-                * else if at least one endpoint is ready, continue probing.
-                */
-               if (ret == -EPROBE_DEFER)
-                       return ret;
-               else if (!ret)
-                       endpoint_not_ready = 0;
-       }
-
-       if (endpoint_not_ready)
-               return endpoint_not_ready;
-
-       rstc = devm_reset_control_get_exclusive(dev, NULL);
-
-       mutex_init(&ldev->err_lock);
+       /* Get number of endpoints */
+       nb_endpoints = of_graph_get_endpoint_count(np);
+       if (!nb_endpoints)
+               return -ENODEV;
 
        ldev->pixel_clk = devm_clk_get(dev, "lcd");
        if (IS_ERR(ldev->pixel_clk)) {
@@ -1244,6 +1226,43 @@ int ltdc_load(struct drm_device *ddev)
                return -ENODEV;
        }
 
+       /* Get endpoints if any */
+       for (i = 0; i < nb_endpoints; i++) {
+               ret = drm_of_find_panel_or_bridge(np, 0, i, &panel, &bridge);
+
+               /*
+                * If at least one endpoint is -ENODEV, continue probing,
+                * else if at least one endpoint returned an error
+                * (ie -EPROBE_DEFER) then stop probing.
+                */
+               if (ret == -ENODEV)
+                       continue;
+               else if (ret)
+                       goto err;
+
+               if (panel) {
+                       bridge = drm_panel_bridge_add_typed(panel,
+                                                           DRM_MODE_CONNECTOR_DPI);
+                       if (IS_ERR(bridge)) {
+                               DRM_ERROR("panel-bridge endpoint %d\n", i);
+                               ret = PTR_ERR(bridge);
+                               goto err;
+                       }
+               }
+
+               if (bridge) {
+                       ret = ltdc_encoder_init(ddev, bridge);
+                       if (ret) {
+                               DRM_ERROR("init encoder endpoint %d\n", i);
+                               goto err;
+                       }
+               }
+       }
+
+       rstc = devm_reset_control_get_exclusive(dev, NULL);
+
+       mutex_init(&ldev->err_lock);
+
        if (!IS_ERR(rstc)) {
                reset_control_assert(rstc);
                usleep_range(10, 20);
@@ -1285,27 +1304,7 @@ int ltdc_load(struct drm_device *ddev)
                        DRM_ERROR("Failed to register LTDC interrupt\n");
                        goto err;
                }
-       }
 
-       /* Add endpoints panels or bridges if any */
-       for (i = 0; i < MAX_ENDPOINTS; i++) {
-               if (panel[i]) {
-                       bridge[i] = drm_panel_bridge_add_typed(panel[i],
-                                                              DRM_MODE_CONNECTOR_DPI);
-                       if (IS_ERR(bridge[i])) {
-                               DRM_ERROR("panel-bridge endpoint %d\n", i);
-                               ret = PTR_ERR(bridge[i]);
-                               goto err;
-                       }
-               }
-
-               if (bridge[i]) {
-                       ret = ltdc_encoder_init(ddev, bridge[i]);
-                       if (ret) {
-                               DRM_ERROR("init encoder endpoint %d\n", i);
-                               goto err;
-                       }
-               }
        }
 
        crtc = devm_kzalloc(dev, sizeof(*crtc), GFP_KERNEL);
@@ -1340,8 +1339,8 @@ int ltdc_load(struct drm_device *ddev)
 
        return 0;
 err:
-       for (i = 0; i < MAX_ENDPOINTS; i++)
-               drm_panel_bridge_remove(bridge[i]);
+       for (i = 0; i < nb_endpoints; i++)
+               drm_of_panel_bridge_remove(ddev->dev->of_node, 0, i);
 
        clk_disable_unprepare(ldev->pixel_clk);
 
@@ -1350,11 +1349,14 @@ err:
 
 void ltdc_unload(struct drm_device *ddev)
 {
-       int i;
+       struct device *dev = ddev->dev;
+       int nb_endpoints, i;
 
        DRM_DEBUG_DRIVER("\n");
 
-       for (i = 0; i < MAX_ENDPOINTS; i++)
+       nb_endpoints = of_graph_get_endpoint_count(dev->of_node);
+
+       for (i = 0; i < nb_endpoints; i++)
                drm_of_panel_bridge_remove(ddev->dev->of_node, 0, i);
 
        pm_runtime_disable(ddev->dev);
index 68d4644ac2dcc32a2d4118d824f13eba5d284921..e324d7db7b7db6a698a3fdf6b338f3263ae59e90 100644 (file)
@@ -22,6 +22,7 @@
 #include <drm/drm_panel.h>
 #include <drm/drm_print.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "sun4i_backend.h"
 #include "sun4i_crtc.h"
@@ -204,10 +205,6 @@ static const struct drm_encoder_helper_funcs sun4i_hdmi_helper_funcs = {
        .mode_valid     = sun4i_hdmi_mode_valid,
 };
 
-static const struct drm_encoder_funcs sun4i_hdmi_funcs = {
-       .destroy        = drm_encoder_cleanup,
-};
-
 static int sun4i_hdmi_get_modes(struct drm_connector *connector)
 {
        struct sun4i_hdmi *hdmi = drm_connector_to_sun4i_hdmi(connector);
@@ -611,11 +608,8 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
 
        drm_encoder_helper_add(&hdmi->encoder,
                               &sun4i_hdmi_helper_funcs);
-       ret = drm_encoder_init(drm,
-                              &hdmi->encoder,
-                              &sun4i_hdmi_funcs,
-                              DRM_MODE_ENCODER_TMDS,
-                              NULL);
+       ret = drm_simple_encoder_init(drm, &hdmi->encoder,
+                                     DRM_MODE_ENCODER_TMDS);
        if (ret) {
                dev_err(dev, "Couldn't initialise the HDMI encoder\n");
                goto err_put_ddc_i2c;
index 26e5c7ceb8ffd25110b4831a715ee71fa6ea29c6..ffda3184aa12ab2dccfec07214695207f57b2380 100644 (file)
@@ -12,6 +12,7 @@
 #include <drm/drm_panel.h>
 #include <drm/drm_print.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "sun4i_crtc.h"
 #include "sun4i_tcon.h"
@@ -96,10 +97,6 @@ static const struct drm_encoder_helper_funcs sun4i_lvds_enc_helper_funcs = {
        .enable         = sun4i_lvds_encoder_enable,
 };
 
-static const struct drm_encoder_funcs sun4i_lvds_enc_funcs = {
-       .destroy        = drm_encoder_cleanup,
-};
-
 int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon)
 {
        struct drm_encoder *encoder;
@@ -121,11 +118,8 @@ int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon)
 
        drm_encoder_helper_add(&lvds->encoder,
                               &sun4i_lvds_enc_helper_funcs);
-       ret = drm_encoder_init(drm,
-                              &lvds->encoder,
-                              &sun4i_lvds_enc_funcs,
-                              DRM_MODE_ENCODER_LVDS,
-                              NULL);
+       ret = drm_simple_encoder_init(drm, &lvds->encoder,
+                                     DRM_MODE_ENCODER_LVDS);
        if (ret) {
                dev_err(drm->dev, "Couldn't initialise the lvds encoder\n");
                goto err_out;
index 3b23d5be3cf3b6734d0ceb2180ad0d358dcb9fca..5a7d43939ae67819742ceec29e189084749f3a3a 100644 (file)
@@ -14,6 +14,7 @@
 #include <drm/drm_panel.h>
 #include <drm/drm_print.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "sun4i_crtc.h"
 #include "sun4i_tcon.h"
@@ -188,15 +189,6 @@ static struct drm_encoder_helper_funcs sun4i_rgb_enc_helper_funcs = {
        .mode_valid     = sun4i_rgb_mode_valid,
 };
 
-static void sun4i_rgb_enc_destroy(struct drm_encoder *encoder)
-{
-       drm_encoder_cleanup(encoder);
-}
-
-static struct drm_encoder_funcs sun4i_rgb_enc_funcs = {
-       .destroy        = sun4i_rgb_enc_destroy,
-};
-
 int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
 {
        struct drm_encoder *encoder;
@@ -218,11 +210,8 @@ int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
 
        drm_encoder_helper_add(&rgb->encoder,
                               &sun4i_rgb_enc_helper_funcs);
-       ret = drm_encoder_init(drm,
-                              &rgb->encoder,
-                              &sun4i_rgb_enc_funcs,
-                              DRM_MODE_ENCODER_NONE,
-                              NULL);
+       ret = drm_simple_encoder_init(drm, &rgb->encoder,
+                                     DRM_MODE_ENCODER_NONE);
        if (ret) {
                dev_err(drm->dev, "Couldn't initialise the rgb encoder\n");
                goto err_out;
index 624437b27cdc48db078a54ae3a18b87633d8e373..359b56e43b83c564e48e062c2f12a1524f20485f 100644 (file)
@@ -812,10 +812,8 @@ static int sun4i_tcon_init_irq(struct device *dev,
        int irq, ret;
 
        irq = platform_get_irq(pdev, 0);
-       if (irq < 0) {
-               dev_err(dev, "Couldn't retrieve the TCON interrupt\n");
+       if (irq < 0)
                return irq;
-       }
 
        ret = devm_request_irq(dev, irq, sun4i_tcon_handler, 0,
                               dev_name(dev), tcon);
index 39c15282e4489ef4df03ae406a7bcf1cc8472849..63f4428ac3bf786e31f85db704921360276d6b57 100644 (file)
@@ -19,6 +19,7 @@
 #include <drm/drm_panel.h>
 #include <drm/drm_print.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "sun4i_crtc.h"
 #include "sun4i_drv.h"
@@ -473,15 +474,6 @@ static struct drm_encoder_helper_funcs sun4i_tv_helper_funcs = {
        .mode_set       = sun4i_tv_mode_set,
 };
 
-static void sun4i_tv_destroy(struct drm_encoder *encoder)
-{
-       drm_encoder_cleanup(encoder);
-}
-
-static struct drm_encoder_funcs sun4i_tv_funcs = {
-       .destroy        = sun4i_tv_destroy,
-};
-
 static int sun4i_tv_comp_get_modes(struct drm_connector *connector)
 {
        int i;
@@ -592,11 +584,8 @@ static int sun4i_tv_bind(struct device *dev, struct device *master,
 
        drm_encoder_helper_add(&tv->encoder,
                               &sun4i_tv_helper_funcs);
-       ret = drm_encoder_init(drm,
-                              &tv->encoder,
-                              &sun4i_tv_funcs,
-                              DRM_MODE_ENCODER_TVDAC,
-                              NULL);
+       ret = drm_simple_encoder_init(drm, &tv->encoder,
+                                     DRM_MODE_ENCODER_TVDAC);
        if (ret) {
                dev_err(dev, "Couldn't initialise the TV encoder\n");
                goto err_disable_clk;
index 3eb89f1eb0e1ad44bfde8cb12359111ef5ad68be..aa67cb037e9d10a417e932ea9abc7fca68fdb30f 100644 (file)
@@ -24,6 +24,7 @@
 #include <drm/drm_panel.h>
 #include <drm/drm_print.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "sun4i_crtc.h"
 #include "sun4i_tcon.h"
@@ -846,10 +847,6 @@ static const struct drm_encoder_helper_funcs sun6i_dsi_enc_helper_funcs = {
        .enable         = sun6i_dsi_encoder_enable,
 };
 
-static const struct drm_encoder_funcs sun6i_dsi_enc_funcs = {
-       .destroy        = drm_encoder_cleanup,
-};
-
 static u32 sun6i_dsi_dcs_build_pkt_hdr(struct sun6i_dsi *dsi,
                                       const struct mipi_dsi_msg *msg)
 {
@@ -1062,11 +1059,8 @@ static int sun6i_dsi_bind(struct device *dev, struct device *master,
 
        drm_encoder_helper_add(&dsi->encoder,
                               &sun6i_dsi_enc_helper_funcs);
-       ret = drm_encoder_init(drm,
-                              &dsi->encoder,
-                              &sun6i_dsi_enc_funcs,
-                              DRM_MODE_ENCODER_DSI,
-                              NULL);
+       ret = drm_simple_encoder_init(drm, &dsi->encoder,
+                                     DRM_MODE_ENCODER_DSI);
        if (ret) {
                dev_err(dsi->dev, "Couldn't initialise the DSI encoder\n");
                return ret;
index e8a317d5ba194442134ef57f967ab5bf8dfd3549..972682bb8000919df20765945d09adf3e61b2b2b 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_of.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "sun8i_dw_hdmi.h"
 #include "sun8i_tcon_top.h"
@@ -29,10 +30,6 @@ sun8i_dw_hdmi_encoder_helper_funcs = {
        .mode_set = sun8i_dw_hdmi_encoder_mode_set,
 };
 
-static const struct drm_encoder_funcs sun8i_dw_hdmi_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static enum drm_mode_status
 sun8i_dw_hdmi_mode_valid_a83t(struct drm_connector *connector,
                              const struct drm_display_mode *mode)
@@ -220,8 +217,7 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
        }
 
        drm_encoder_helper_add(encoder, &sun8i_dw_hdmi_encoder_helper_funcs);
-       drm_encoder_init(drm, encoder, &sun8i_dw_hdmi_encoder_funcs,
-                        DRM_MODE_ENCODER_TMDS, NULL);
+       drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
 
        sun8i_hdmi_phy_init(hdmi->phy);
 
index 4a64f7ae437a8e13dc9510ae4e1264a87c4d9295..56cc037fd31288e1fb2364e3b1ff001106eec2c4 100644 (file)
 #include "sun8i_vi_layer.h"
 #include "sunxi_engine.h"
 
+struct de2_fmt_info {
+       u32     drm_fmt;
+       u32     de2_fmt;
+};
+
 static const struct de2_fmt_info de2_formats[] = {
        {
                .drm_fmt = DRM_FORMAT_ARGB8888,
                .de2_fmt = SUN8I_MIXER_FBFMT_ARGB8888,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_ABGR8888,
                .de2_fmt = SUN8I_MIXER_FBFMT_ABGR8888,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_RGBA8888,
                .de2_fmt = SUN8I_MIXER_FBFMT_RGBA8888,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_BGRA8888,
                .de2_fmt = SUN8I_MIXER_FBFMT_BGRA8888,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_XRGB8888,
                .de2_fmt = SUN8I_MIXER_FBFMT_XRGB8888,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_XBGR8888,
                .de2_fmt = SUN8I_MIXER_FBFMT_XBGR8888,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_RGBX8888,
                .de2_fmt = SUN8I_MIXER_FBFMT_RGBX8888,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_BGRX8888,
                .de2_fmt = SUN8I_MIXER_FBFMT_BGRX8888,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_RGB888,
                .de2_fmt = SUN8I_MIXER_FBFMT_RGB888,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_BGR888,
                .de2_fmt = SUN8I_MIXER_FBFMT_BGR888,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_RGB565,
                .de2_fmt = SUN8I_MIXER_FBFMT_RGB565,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_BGR565,
                .de2_fmt = SUN8I_MIXER_FBFMT_BGR565,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_ARGB4444,
                .de2_fmt = SUN8I_MIXER_FBFMT_ARGB4444,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                /* for DE2 VI layer which ignores alpha */
                .drm_fmt = DRM_FORMAT_XRGB4444,
                .de2_fmt = SUN8I_MIXER_FBFMT_ARGB4444,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_ABGR4444,
                .de2_fmt = SUN8I_MIXER_FBFMT_ABGR4444,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                /* for DE2 VI layer which ignores alpha */
                .drm_fmt = DRM_FORMAT_XBGR4444,
                .de2_fmt = SUN8I_MIXER_FBFMT_ABGR4444,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_RGBA4444,
                .de2_fmt = SUN8I_MIXER_FBFMT_RGBA4444,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                /* for DE2 VI layer which ignores alpha */
                .drm_fmt = DRM_FORMAT_RGBX4444,
                .de2_fmt = SUN8I_MIXER_FBFMT_RGBA4444,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_BGRA4444,
                .de2_fmt = SUN8I_MIXER_FBFMT_BGRA4444,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                /* for DE2 VI layer which ignores alpha */
                .drm_fmt = DRM_FORMAT_BGRX4444,
                .de2_fmt = SUN8I_MIXER_FBFMT_BGRA4444,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_ARGB1555,
                .de2_fmt = SUN8I_MIXER_FBFMT_ARGB1555,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                /* for DE2 VI layer which ignores alpha */
                .drm_fmt = DRM_FORMAT_XRGB1555,
                .de2_fmt = SUN8I_MIXER_FBFMT_ARGB1555,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_ABGR1555,
                .de2_fmt = SUN8I_MIXER_FBFMT_ABGR1555,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                /* for DE2 VI layer which ignores alpha */
                .drm_fmt = DRM_FORMAT_XBGR1555,
                .de2_fmt = SUN8I_MIXER_FBFMT_ABGR1555,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_RGBA5551,
                .de2_fmt = SUN8I_MIXER_FBFMT_RGBA5551,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                /* for DE2 VI layer which ignores alpha */
                .drm_fmt = DRM_FORMAT_RGBX5551,
                .de2_fmt = SUN8I_MIXER_FBFMT_RGBA5551,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_BGRA5551,
                .de2_fmt = SUN8I_MIXER_FBFMT_BGRA5551,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                /* for DE2 VI layer which ignores alpha */
                .drm_fmt = DRM_FORMAT_BGRX5551,
                .de2_fmt = SUN8I_MIXER_FBFMT_BGRA5551,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_ARGB2101010,
                .de2_fmt = SUN8I_MIXER_FBFMT_ARGB2101010,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_ABGR2101010,
                .de2_fmt = SUN8I_MIXER_FBFMT_ABGR2101010,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_RGBA1010102,
                .de2_fmt = SUN8I_MIXER_FBFMT_RGBA1010102,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_BGRA1010102,
                .de2_fmt = SUN8I_MIXER_FBFMT_BGRA1010102,
-               .rgb = true,
-               .csc = SUN8I_CSC_MODE_OFF,
        },
        {
                .drm_fmt = DRM_FORMAT_UYVY,
                .de2_fmt = SUN8I_MIXER_FBFMT_UYVY,
-               .rgb = false,
-               .csc = SUN8I_CSC_MODE_YUV2RGB,
        },
        {
                .drm_fmt = DRM_FORMAT_VYUY,
                .de2_fmt = SUN8I_MIXER_FBFMT_VYUY,
-               .rgb = false,
-               .csc = SUN8I_CSC_MODE_YUV2RGB,
        },
        {
                .drm_fmt = DRM_FORMAT_YUYV,
                .de2_fmt = SUN8I_MIXER_FBFMT_YUYV,
-               .rgb = false,
-               .csc = SUN8I_CSC_MODE_YUV2RGB,
        },
        {
                .drm_fmt = DRM_FORMAT_YVYU,
                .de2_fmt = SUN8I_MIXER_FBFMT_YVYU,
-               .rgb = false,
-               .csc = SUN8I_CSC_MODE_YUV2RGB,
        },
        {
                .drm_fmt = DRM_FORMAT_NV16,
                .de2_fmt = SUN8I_MIXER_FBFMT_NV16,
-               .rgb = false,
-               .csc = SUN8I_CSC_MODE_YUV2RGB,
        },
        {
                .drm_fmt = DRM_FORMAT_NV61,
                .de2_fmt = SUN8I_MIXER_FBFMT_NV61,
-               .rgb = false,
-               .csc = SUN8I_CSC_MODE_YUV2RGB,
        },
        {
                .drm_fmt = DRM_FORMAT_NV12,
                .de2_fmt = SUN8I_MIXER_FBFMT_NV12,
-               .rgb = false,
-               .csc = SUN8I_CSC_MODE_YUV2RGB,
        },
        {
                .drm_fmt = DRM_FORMAT_NV21,
                .de2_fmt = SUN8I_MIXER_FBFMT_NV21,
-               .rgb = false,
-               .csc = SUN8I_CSC_MODE_YUV2RGB,
        },
        {
                .drm_fmt = DRM_FORMAT_YUV422,
                .de2_fmt = SUN8I_MIXER_FBFMT_YUV422,
-               .rgb = false,
-               .csc = SUN8I_CSC_MODE_YUV2RGB,
        },
        {
                .drm_fmt = DRM_FORMAT_YUV420,
                .de2_fmt = SUN8I_MIXER_FBFMT_YUV420,
-               .rgb = false,
-               .csc = SUN8I_CSC_MODE_YUV2RGB,
        },
        {
                .drm_fmt = DRM_FORMAT_YUV411,
                .de2_fmt = SUN8I_MIXER_FBFMT_YUV411,
-               .rgb = false,
-               .csc = SUN8I_CSC_MODE_YUV2RGB,
        },
        {
                .drm_fmt = DRM_FORMAT_YVU422,
                .de2_fmt = SUN8I_MIXER_FBFMT_YUV422,
-               .rgb = false,
-               .csc = SUN8I_CSC_MODE_YVU2RGB,
        },
        {
                .drm_fmt = DRM_FORMAT_YVU420,
                .de2_fmt = SUN8I_MIXER_FBFMT_YUV420,
-               .rgb = false,
-               .csc = SUN8I_CSC_MODE_YVU2RGB,
        },
        {
                .drm_fmt = DRM_FORMAT_YVU411,
                .de2_fmt = SUN8I_MIXER_FBFMT_YUV411,
-               .rgb = false,
-               .csc = SUN8I_CSC_MODE_YVU2RGB,
        },
        {
                .drm_fmt = DRM_FORMAT_P010,
                .de2_fmt = SUN8I_MIXER_FBFMT_P010_YUV,
-               .rgb = false,
-               .csc = SUN8I_CSC_MODE_YUV2RGB,
        },
        {
                .drm_fmt = DRM_FORMAT_P210,
                .de2_fmt = SUN8I_MIXER_FBFMT_P210_YUV,
-               .rgb = false,
-               .csc = SUN8I_CSC_MODE_YUV2RGB,
        },
 };
 
-const struct de2_fmt_info *sun8i_mixer_format_info(u32 format)
+int sun8i_mixer_drm_format_to_hw(u32 format, u32 *hw_format)
 {
        unsigned int i;
 
        for (i = 0; i < ARRAY_SIZE(de2_formats); ++i)
-               if (de2_formats[i].drm_fmt == format)
-                       return &de2_formats[i];
+               if (de2_formats[i].drm_fmt == format) {
+                       *hw_format = de2_formats[i].de2_fmt;
+                       return 0;
+               }
 
-       return NULL;
+       return -EINVAL;
 }
 
 static void sun8i_mixer_commit(struct sunxi_engine *engine)
index 345b28b0a80a1b8abe27870831d74b3e25f61256..7576b523fdbb1409c720ca5874425a71be9cc411 100644 (file)
@@ -10,7 +10,6 @@
 #include <linux/regmap.h>
 #include <linux/reset.h>
 
-#include "sun8i_csc.h"
 #include "sunxi_engine.h"
 
 #define SUN8I_MIXER_SIZE(w, h)                 (((h) - 1) << 16 | ((w) - 1))
 #define SUN50I_MIXER_CDC0_EN                   0xd0000
 #define SUN50I_MIXER_CDC1_EN                   0xd8000
 
-struct de2_fmt_info {
-       u32                     drm_fmt;
-       u32                     de2_fmt;
-       bool                    rgb;
-       enum sun8i_csc_mode     csc;
-};
-
 /**
  * struct sun8i_mixer_cfg - mixer HW configuration
  * @vi_num: number of VI channels
@@ -210,5 +202,5 @@ sun8i_channel_base(struct sun8i_mixer *mixer, int channel)
                return DE2_CH_BASE + channel * DE2_CH_SIZE;
 }
 
-const struct de2_fmt_info *sun8i_mixer_format_info(u32 format);
+int sun8i_mixer_drm_format_to_hw(u32 format, u32 *hw_format);
 #endif /* _SUN8I_MIXER_H_ */
index c87fd842918e58c10e74ecf2142e18781a34fb8f..54f937a7d5e7aa5d58faaf257afac094f0353f2e 100644 (file)
@@ -19,8 +19,8 @@
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_probe_helper.h>
 
-#include "sun8i_ui_layer.h"
 #include "sun8i_mixer.h"
+#include "sun8i_ui_layer.h"
 #include "sun8i_ui_scaler.h"
 
 static void sun8i_ui_layer_enable(struct sun8i_mixer *mixer, int channel,
@@ -174,18 +174,20 @@ static int sun8i_ui_layer_update_formats(struct sun8i_mixer *mixer, int channel,
                                         int overlay, struct drm_plane *plane)
 {
        struct drm_plane_state *state = plane->state;
-       const struct de2_fmt_info *fmt_info;
-       u32 val, ch_base;
+       const struct drm_format_info *fmt;
+       u32 val, ch_base, hw_fmt;
+       int ret;
 
        ch_base = sun8i_channel_base(mixer, channel);
 
-       fmt_info = sun8i_mixer_format_info(state->fb->format->format);
-       if (!fmt_info || !fmt_info->rgb) {
+       fmt = state->fb->format;
+       ret = sun8i_mixer_drm_format_to_hw(fmt->format, &hw_fmt);
+       if (ret || fmt->is_yuv) {
                DRM_DEBUG_DRIVER("Invalid format\n");
                return -EINVAL;
        }
 
-       val = fmt_info->de2_fmt << SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_OFFSET;
+       val = hw_fmt << SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_OFFSET;
        regmap_update_bits(mixer->engine.regs,
                           SUN8I_MIXER_CHAN_UI_LAYER_ATTR(ch_base, overlay),
                           SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_MASK, val);
index b8398ca18b0fd20027f136de12048cb7a094707d..22c8c5375d0db8d0c53f96a6c56092914497cac4 100644 (file)
@@ -12,8 +12,9 @@
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_probe_helper.h>
 
-#include "sun8i_vi_layer.h"
+#include "sun8i_csc.h"
 #include "sun8i_mixer.h"
+#include "sun8i_vi_layer.h"
 #include "sun8i_vi_scaler.h"
 
 static void sun8i_vi_layer_enable(struct sun8i_mixer *mixer, int channel,
@@ -210,28 +211,47 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
        return 0;
 }
 
+static bool sun8i_vi_layer_get_csc_mode(const struct drm_format_info *format)
+{
+       if (!format->is_yuv)
+               return SUN8I_CSC_MODE_OFF;
+
+       switch (format->format) {
+       case DRM_FORMAT_YVU411:
+       case DRM_FORMAT_YVU420:
+       case DRM_FORMAT_YVU422:
+       case DRM_FORMAT_YVU444:
+               return SUN8I_CSC_MODE_YVU2RGB;
+       default:
+               return SUN8I_CSC_MODE_YUV2RGB;
+       }
+}
+
 static int sun8i_vi_layer_update_formats(struct sun8i_mixer *mixer, int channel,
                                         int overlay, struct drm_plane *plane)
 {
        struct drm_plane_state *state = plane->state;
-       const struct de2_fmt_info *fmt_info;
-       u32 val, ch_base;
+       u32 val, ch_base, csc_mode, hw_fmt;
+       const struct drm_format_info *fmt;
+       int ret;
 
        ch_base = sun8i_channel_base(mixer, channel);
 
-       fmt_info = sun8i_mixer_format_info(state->fb->format->format);
-       if (!fmt_info) {
+       fmt = state->fb->format;
+       ret = sun8i_mixer_drm_format_to_hw(fmt->format, &hw_fmt);
+       if (ret) {
                DRM_DEBUG_DRIVER("Invalid format\n");
-               return -EINVAL;
+               return ret;
        }
 
-       val = fmt_info->de2_fmt << SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_OFFSET;
+       val = hw_fmt << SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_OFFSET;
        regmap_update_bits(mixer->engine.regs,
                           SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch_base, overlay),
                           SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_MASK, val);
 
-       if (fmt_info->csc != SUN8I_CSC_MODE_OFF) {
-               sun8i_csc_set_ccsc_coefficients(mixer, channel, fmt_info->csc,
+       csc_mode = sun8i_vi_layer_get_csc_mode(fmt);
+       if (csc_mode != SUN8I_CSC_MODE_OFF) {
+               sun8i_csc_set_ccsc_coefficients(mixer, channel, csc_mode,
                                                state->color_encoding,
                                                state->color_range);
                sun8i_csc_enable_ccsc(mixer, channel, true);
@@ -239,7 +259,7 @@ static int sun8i_vi_layer_update_formats(struct sun8i_mixer *mixer, int channel,
                sun8i_csc_enable_ccsc(mixer, channel, false);
        }
 
-       if (fmt_info->rgb)
+       if (!fmt->is_yuv)
                val = SUN8I_MIXER_CHAN_VI_LAYER_ATTR_RGB_MODE;
        else
                val = 0;
index 1a7b08f35776826042ee1337d9df350d2d7d275f..83f31c6e891c6403807d50f430da5d2e7ce9a1df 100644 (file)
@@ -1496,7 +1496,6 @@ static int tegra_dc_late_register(struct drm_crtc *crtc)
        struct drm_minor *minor = crtc->dev->primary;
        struct dentry *root;
        struct tegra_dc *dc = to_tegra_dc(crtc);
-       int err;
 
 #ifdef CONFIG_DEBUG_FS
        root = crtc->debugfs_entry;
@@ -1512,17 +1511,9 @@ static int tegra_dc_late_register(struct drm_crtc *crtc)
        for (i = 0; i < count; i++)
                dc->debugfs_files[i].data = dc;
 
-       err = drm_debugfs_create_files(dc->debugfs_files, count, root, minor);
-       if (err < 0)
-               goto free;
+       drm_debugfs_create_files(dc->debugfs_files, count, root, minor);
 
        return 0;
-
-free:
-       kfree(dc->debugfs_files);
-       dc->debugfs_files = NULL;
-
-       return err;
 }
 
 static void tegra_dc_early_unregister(struct drm_crtc *crtc)
index 7dfb50f65067f291544f06f011f5cfa647193f1b..105fb9cdbb3bdc6cd05acc0bbf1083f23234a26a 100644 (file)
@@ -5,12 +5,10 @@
 
 #include <linux/clk.h>
 #include <linux/delay.h>
-#include <linux/gpio.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/module.h>
 #include <linux/of_device.h>
-#include <linux/of_gpio.h>
 #include <linux/pinctrl/pinconf-generic.h>
 #include <linux/pinctrl/pinctrl.h>
 #include <linux/pinctrl/pinmux.h>
index 583cd6e0ae27faa454f32243de79b7a5d703ac8f..211906347f3f98f73e2b0a2e5a573a355de3150a 100644 (file)
@@ -839,11 +839,11 @@ static struct drm_info_list tegra_debugfs_list[] = {
        { "iova", tegra_debugfs_iova, 0 },
 };
 
-static int tegra_debugfs_init(struct drm_minor *minor)
+static void tegra_debugfs_init(struct drm_minor *minor)
 {
-       return drm_debugfs_create_files(tegra_debugfs_list,
-                                       ARRAY_SIZE(tegra_debugfs_list),
-                                       minor->debugfs_root, minor);
+       drm_debugfs_create_files(tegra_debugfs_list,
+                                ARRAY_SIZE(tegra_debugfs_list),
+                                minor->debugfs_root, minor);
 }
 #endif
 
index ed99b67deb292c15e0deb5b59216bd00c7ceadfb..b25443255be6beb1995c675fc377d0b1152f1968 100644 (file)
@@ -9,7 +9,7 @@
 
 #include <linux/host1x.h>
 #include <linux/iova.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
 
 #include <drm/drm_atomic.h>
 #include <drm/drm_edid.h>
@@ -152,8 +152,6 @@ enum drm_connector_status
 tegra_output_connector_detect(struct drm_connector *connector, bool force);
 void tegra_output_connector_destroy(struct drm_connector *connector);
 
-void tegra_output_encoder_destroy(struct drm_encoder *encoder);
-
 /* from dpaux.c */
 struct drm_dp_aux *drm_dp_aux_find_by_of_node(struct device_node *np);
 enum drm_connector_status drm_dp_aux_detect(struct drm_dp_aux *aux);
index 88b9d64c77bf2ee71528ff65ca857b4e6cc3b652..38beab9ab4f8c0921f2c3840b38a4528bbdf27ce 100644 (file)
@@ -22,6 +22,7 @@
 #include <drm/drm_file.h>
 #include <drm/drm_mipi_dsi.h>
 #include <drm/drm_panel.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "dc.h"
 #include "drm.h"
@@ -234,7 +235,6 @@ static int tegra_dsi_late_register(struct drm_connector *connector)
        struct drm_minor *minor = connector->dev->primary;
        struct dentry *root = connector->debugfs_entry;
        struct tegra_dsi *dsi = to_dsi(output);
-       int err;
 
        dsi->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
                                     GFP_KERNEL);
@@ -244,17 +244,9 @@ static int tegra_dsi_late_register(struct drm_connector *connector)
        for (i = 0; i < count; i++)
                dsi->debugfs_files[i].data = dsi;
 
-       err = drm_debugfs_create_files(dsi->debugfs_files, count, root, minor);
-       if (err < 0)
-               goto free;
+       drm_debugfs_create_files(dsi->debugfs_files, count, root, minor);
 
        return 0;
-
-free:
-       kfree(dsi->debugfs_files);
-       dsi->debugfs_files = NULL;
-
-       return err;
 }
 
 static void tegra_dsi_early_unregister(struct drm_connector *connector)
@@ -824,10 +816,6 @@ static const struct drm_connector_helper_funcs tegra_dsi_connector_helper_funcs
        .mode_valid = tegra_dsi_connector_mode_valid,
 };
 
-static const struct drm_encoder_funcs tegra_dsi_encoder_funcs = {
-       .destroy = tegra_output_encoder_destroy,
-};
-
 static void tegra_dsi_unprepare(struct tegra_dsi *dsi)
 {
        int err;
@@ -1058,9 +1046,8 @@ static int tegra_dsi_init(struct host1x_client *client)
                                         &tegra_dsi_connector_helper_funcs);
                dsi->output.connector.dpms = DRM_MODE_DPMS_OFF;
 
-               drm_encoder_init(drm, &dsi->output.encoder,
-                                &tegra_dsi_encoder_funcs,
-                                DRM_MODE_ENCODER_DSI, NULL);
+               drm_simple_encoder_init(drm, &dsi->output.encoder,
+                                       DRM_MODE_ENCODER_DSI);
                drm_encoder_helper_add(&dsi->output.encoder,
                                       &tegra_dsi_encoder_helper_funcs);
 
index b8a328f538626e7a2b833a9f4aac60c70ee6077d..2b0666ac681b87214cc0dbcc57b0ee22a5e94dcb 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright (C) 2012 NVIDIA CORPORATION.  All rights reserved.
  *
  * Based on the KMS/FB CMA helpers
- *   Copyright (C) 2012 Analog Device Inc.
+ *   Copyright (C) 2012 Analog Devices Inc.
  */
 
 #include <linux/console.h>
index 38252c0f068dff704928a34435f06f7374697b5e..d09a24931c87cbb8499490fdfba287401d0fbf04 100644 (file)
@@ -7,7 +7,6 @@
 #include <linux/clk.h>
 #include <linux/debugfs.h>
 #include <linux/delay.h>
-#include <linux/gpio.h>
 #include <linux/hdmi.h>
 #include <linux/math64.h>
 #include <linux/module.h>
@@ -22,6 +21,7 @@
 #include <drm/drm_file.h>
 #include <drm/drm_fourcc.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "hda.h"
 #include "hdmi.h"
@@ -1064,7 +1064,6 @@ static int tegra_hdmi_late_register(struct drm_connector *connector)
        struct drm_minor *minor = connector->dev->primary;
        struct dentry *root = connector->debugfs_entry;
        struct tegra_hdmi *hdmi = to_hdmi(output);
-       int err;
 
        hdmi->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
                                      GFP_KERNEL);
@@ -1074,17 +1073,9 @@ static int tegra_hdmi_late_register(struct drm_connector *connector)
        for (i = 0; i < count; i++)
                hdmi->debugfs_files[i].data = hdmi;
 
-       err = drm_debugfs_create_files(hdmi->debugfs_files, count, root, minor);
-       if (err < 0)
-               goto free;
+       drm_debugfs_create_files(hdmi->debugfs_files, count, root, minor);
 
        return 0;
-
-free:
-       kfree(hdmi->debugfs_files);
-       hdmi->debugfs_files = NULL;
-
-       return err;
 }
 
 static void tegra_hdmi_early_unregister(struct drm_connector *connector)
@@ -1136,10 +1127,6 @@ tegra_hdmi_connector_helper_funcs = {
        .mode_valid = tegra_hdmi_connector_mode_valid,
 };
 
-static const struct drm_encoder_funcs tegra_hdmi_encoder_funcs = {
-       .destroy = tegra_output_encoder_destroy,
-};
-
 static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
 {
        struct tegra_output *output = encoder_to_output(encoder);
@@ -1445,8 +1432,8 @@ static int tegra_hdmi_init(struct host1x_client *client)
                                 &tegra_hdmi_connector_helper_funcs);
        hdmi->output.connector.dpms = DRM_MODE_DPMS_OFF;
 
-       drm_encoder_init(drm, &hdmi->output.encoder, &tegra_hdmi_encoder_funcs,
-                        DRM_MODE_ENCODER_TMDS, NULL);
+       drm_simple_encoder_init(drm, &hdmi->output.encoder,
+                               DRM_MODE_ENCODER_TMDS);
        drm_encoder_helper_add(&hdmi->output.encoder,
                               &tegra_hdmi_encoder_helper_funcs);
 
index a264259b97a26b85fb7fd1d203e9ab93a68bd2f5..e36e5e7c2f694dfe627b1c720d959509041735ce 100644 (file)
@@ -6,6 +6,7 @@
 
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_panel.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "drm.h"
 #include "dc.h"
@@ -79,11 +80,6 @@ void tegra_output_connector_destroy(struct drm_connector *connector)
        drm_connector_cleanup(connector);
 }
 
-void tegra_output_encoder_destroy(struct drm_encoder *encoder)
-{
-       drm_encoder_cleanup(encoder);
-}
-
 static irqreturn_t hpd_irq(int irq, void *data)
 {
        struct tegra_output *output = data;
index 4be4dfd4a68a3e31bd085900eccd46541459b08b..0562a7eb793f31c6bb0e4dc9c0ae35efe1d2f5f3 100644 (file)
@@ -8,6 +8,7 @@
 
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_panel.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "drm.h"
 #include "dc.h"
@@ -110,10 +111,6 @@ static const struct drm_connector_helper_funcs tegra_rgb_connector_helper_funcs
        .mode_valid = tegra_rgb_connector_mode_valid,
 };
 
-static const struct drm_encoder_funcs tegra_rgb_encoder_funcs = {
-       .destroy = tegra_output_encoder_destroy,
-};
-
 static void tegra_rgb_encoder_disable(struct drm_encoder *encoder)
 {
        struct tegra_output *output = encoder_to_output(encoder);
@@ -281,8 +278,7 @@ int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc)
                                 &tegra_rgb_connector_helper_funcs);
        output->connector.dpms = DRM_MODE_DPMS_OFF;
 
-       drm_encoder_init(drm, &output->encoder, &tegra_rgb_encoder_funcs,
-                        DRM_MODE_ENCODER_LVDS, NULL);
+       drm_simple_encoder_init(drm, &output->encoder, DRM_MODE_ENCODER_LVDS);
        drm_encoder_helper_add(&output->encoder,
                               &tegra_rgb_encoder_helper_funcs);
 
index 81226a4953c157c68b91753416235e6f3be933b4..7cbcf9617f5e6b2dcc31fb1e59cab96c49e2c342 100644 (file)
@@ -6,7 +6,6 @@
 #include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/debugfs.h>
-#include <linux/gpio.h>
 #include <linux/io.h>
 #include <linux/module.h>
 #include <linux/of_device.h>
@@ -23,6 +22,7 @@
 #include <drm/drm_file.h>
 #include <drm/drm_panel.h>
 #include <drm/drm_scdc_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "dc.h"
 #include "dp.h"
@@ -1687,7 +1687,6 @@ static int tegra_sor_late_register(struct drm_connector *connector)
        struct drm_minor *minor = connector->dev->primary;
        struct dentry *root = connector->debugfs_entry;
        struct tegra_sor *sor = to_sor(output);
-       int err;
 
        sor->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
                                     GFP_KERNEL);
@@ -1697,17 +1696,9 @@ static int tegra_sor_late_register(struct drm_connector *connector)
        for (i = 0; i < count; i++)
                sor->debugfs_files[i].data = sor;
 
-       err = drm_debugfs_create_files(sor->debugfs_files, count, root, minor);
-       if (err < 0)
-               goto free;
+       drm_debugfs_create_files(sor->debugfs_files, count, root, minor);
 
        return 0;
-
-free:
-       kfree(sor->debugfs_files);
-       sor->debugfs_files = NULL;
-
-       return err;
 }
 
 static void tegra_sor_early_unregister(struct drm_connector *connector)
@@ -1805,10 +1796,6 @@ static const struct drm_connector_helper_funcs tegra_sor_connector_helper_funcs
        .mode_valid = tegra_sor_connector_mode_valid,
 };
 
-static const struct drm_encoder_funcs tegra_sor_encoder_funcs = {
-       .destroy = tegra_output_encoder_destroy,
-};
-
 static int
 tegra_sor_encoder_atomic_check(struct drm_encoder *encoder,
                               struct drm_crtc_state *crtc_state,
@@ -3102,8 +3089,7 @@ static int tegra_sor_init(struct host1x_client *client)
                                 &tegra_sor_connector_helper_funcs);
        sor->output.connector.dpms = DRM_MODE_DPMS_OFF;
 
-       drm_encoder_init(drm, &sor->output.encoder, &tegra_sor_encoder_funcs,
-                        encoder, NULL);
+       drm_simple_encoder_init(drm, &sor->output.encoder, encoder);
        drm_encoder_helper_add(&sor->output.encoder, helpers);
 
        drm_connector_attach_encoder(&sor->output.connector,
index 3221a707e07366c68c03e2945a72b7ddd5298c20..89a226912de85f425404ec90d713e839fbd575e5 100644 (file)
@@ -24,7 +24,7 @@
 static void tidss_crtc_finish_page_flip(struct tidss_crtc *tcrtc)
 {
        struct drm_device *ddev = tcrtc->crtc.dev;
-       struct tidss_device *tidss = ddev->dev_private;
+       struct tidss_device *tidss = to_tidss(ddev);
        struct drm_pending_vblank_event *event;
        unsigned long flags;
        bool busy;
@@ -88,7 +88,7 @@ static int tidss_crtc_atomic_check(struct drm_crtc *crtc,
                                   struct drm_crtc_state *state)
 {
        struct drm_device *ddev = crtc->dev;
-       struct tidss_device *tidss = ddev->dev_private;
+       struct tidss_device *tidss = to_tidss(ddev);
        struct dispc_device *dispc = tidss->dispc;
        struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
        u32 hw_videoport = tcrtc->hw_videoport;
@@ -165,7 +165,7 @@ static void tidss_crtc_atomic_flush(struct drm_crtc *crtc,
 {
        struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
        struct drm_device *ddev = crtc->dev;
-       struct tidss_device *tidss = ddev->dev_private;
+       struct tidss_device *tidss = to_tidss(ddev);
        unsigned long flags;
 
        dev_dbg(ddev->dev,
@@ -216,7 +216,7 @@ static void tidss_crtc_atomic_enable(struct drm_crtc *crtc,
 {
        struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
        struct drm_device *ddev = crtc->dev;
-       struct tidss_device *tidss = ddev->dev_private;
+       struct tidss_device *tidss = to_tidss(ddev);
        const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
        unsigned long flags;
        int r;
@@ -259,7 +259,7 @@ static void tidss_crtc_atomic_disable(struct drm_crtc *crtc,
 {
        struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
        struct drm_device *ddev = crtc->dev;
-       struct tidss_device *tidss = ddev->dev_private;
+       struct tidss_device *tidss = to_tidss(ddev);
        unsigned long flags;
 
        dev_dbg(ddev->dev, "%s, event %p\n", __func__, crtc->state->event);
@@ -295,7 +295,7 @@ enum drm_mode_status tidss_crtc_mode_valid(struct drm_crtc *crtc,
 {
        struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
        struct drm_device *ddev = crtc->dev;
-       struct tidss_device *tidss = ddev->dev_private;
+       struct tidss_device *tidss = to_tidss(ddev);
 
        return dispc_vp_mode_valid(tidss->dispc, tcrtc->hw_videoport, mode);
 }
@@ -314,7 +314,7 @@ static const struct drm_crtc_helper_funcs tidss_crtc_helper_funcs = {
 static int tidss_crtc_enable_vblank(struct drm_crtc *crtc)
 {
        struct drm_device *ddev = crtc->dev;
-       struct tidss_device *tidss = ddev->dev_private;
+       struct tidss_device *tidss = to_tidss(ddev);
 
        dev_dbg(ddev->dev, "%s\n", __func__);
 
@@ -328,7 +328,7 @@ static int tidss_crtc_enable_vblank(struct drm_crtc *crtc)
 static void tidss_crtc_disable_vblank(struct drm_crtc *crtc)
 {
        struct drm_device *ddev = crtc->dev;
-       struct tidss_device *tidss = ddev->dev_private;
+       struct tidss_device *tidss = to_tidss(ddev);
 
        dev_dbg(ddev->dev, "%s\n", __func__);
 
index 29f42768e294738904d6fd09df3d901c795a002e..629dd06393f68051187a1d3abdb1d1e5081b7763 100644 (file)
@@ -181,10 +181,6 @@ const struct dispc_features dispc_am65x_feats = {
        .vid_name = { "vid", "vidl1" },
        .vid_lite = { false, true, },
        .vid_order = { 1, 0 },
-
-       .errata = {
-               .i2000 = true,
-       },
 };
 
 static const u16 tidss_j721e_common_regs[DISPC_COMMON_REG_TABLE_LEN] = {
@@ -2674,12 +2670,9 @@ int dispc_init(struct tidss_device *tidss)
                return -ENOMEM;
 
        num_fourccs = 0;
-       for (i = 0; i < ARRAY_SIZE(dispc_color_formats); ++i) {
-               if (feat->errata.i2000 &&
-                   dispc_fourcc_is_yuv(dispc_color_formats[i].fourcc))
-                       continue;
+       for (i = 0; i < ARRAY_SIZE(dispc_color_formats); ++i)
                dispc->fourccs[num_fourccs++] = dispc_color_formats[i].fourcc;
-       }
+
        dispc->num_fourccs = num_fourccs;
        dispc->tidss = tidss;
        dispc->dev = dev;
index a4a68249e44b8da17eccd21f08409cea4adb1009..902e612ff7acd9047f228ef88eedcdbb79dfb9e5 100644 (file)
@@ -46,10 +46,6 @@ struct dispc_features_scaling {
        u32 xinc_max;
 };
 
-struct dispc_errata {
-       bool i2000; /* DSS Does Not Support YUV Pixel Data Formats */
-};
-
 enum dispc_vp_bus_type {
        DISPC_VP_DPI,           /* DPI output */
        DISPC_VP_OLDI,          /* OLDI (LVDS) output */
@@ -83,8 +79,6 @@ struct dispc_features {
        const char *vid_name[TIDSS_MAX_PLANES]; /* Should match dt reg names */
        bool vid_lite[TIDSS_MAX_PLANES];
        u32 vid_order[TIDSS_MAX_PLANES];
-
-       struct dispc_errata errata;
 };
 
 extern const struct dispc_features dispc_k2g_feats;
index d95e4be2c7b9f7f35b036bf1bcc7e813c0da6410..99edc66ebdef29b0a7e1f7bf2d203861a96bfd76 100644 (file)
@@ -17,6 +17,7 @@
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_irq.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_probe_helper.h>
 
 #include "tidss_dispc.h"
@@ -102,15 +103,7 @@ static const struct dev_pm_ops tidss_pm_ops = {
 
 static void tidss_release(struct drm_device *ddev)
 {
-       struct tidss_device *tidss = ddev->dev_private;
-
        drm_kms_helper_poll_fini(ddev);
-
-       tidss_modeset_cleanup(tidss);
-
-       drm_dev_fini(ddev);
-
-       kfree(tidss);
 }
 
 DEFINE_DRM_GEM_CMA_FOPS(tidss_fops);
@@ -142,26 +135,18 @@ static int tidss_probe(struct platform_device *pdev)
 
        dev_dbg(dev, "%s\n", __func__);
 
-       /* Can't use devm_* since drm_device's lifetime may exceed dev's */
-       tidss = kzalloc(sizeof(*tidss), GFP_KERNEL);
-       if (!tidss)
-               return -ENOMEM;
+       tidss = devm_drm_dev_alloc(&pdev->dev, &tidss_driver,
+                                  struct tidss_device, ddev);
+       if (IS_ERR(tidss))
+               return PTR_ERR(tidss);
 
        ddev = &tidss->ddev;
 
-       ret = devm_drm_dev_init(&pdev->dev, ddev, &tidss_driver);
-       if (ret) {
-               kfree(ddev);
-               return ret;
-       }
-
        tidss->dev = dev;
        tidss->feat = of_device_get_match_data(dev);
 
        platform_set_drvdata(pdev, tidss);
 
-       ddev->dev_private = tidss;
-
        ret = dispc_init(tidss);
        if (ret) {
                dev_err(dev, "failed to initialize dispc: %d\n", ret);
index e2aa6436ad1833ff073f1a26a9699730a5f77693..3b0a3d87b7c4c2779c57212cbb254f521082fc10 100644 (file)
@@ -29,10 +29,10 @@ struct tidss_device {
 
        spinlock_t wait_lock;   /* protects the irq masks */
        dispc_irq_t irq_mask;   /* enabled irqs in addition to wait_list */
-
-       struct drm_atomic_state *saved_state;
 };
 
+#define to_tidss(__dev) container_of(__dev, struct tidss_device, ddev)
+
 int tidss_runtime_get(struct tidss_device *tidss);
 void tidss_runtime_put(struct tidss_device *tidss);
 
index 612c046738e5f3dc6d86d5b15260a424fca380d4..1b80f2d62e0aeaa098daf4835c793f4a86156fa7 100644 (file)
@@ -23,7 +23,7 @@ static void tidss_irq_update(struct tidss_device *tidss)
 void tidss_irq_enable_vblank(struct drm_crtc *crtc)
 {
        struct drm_device *ddev = crtc->dev;
-       struct tidss_device *tidss = ddev->dev_private;
+       struct tidss_device *tidss = to_tidss(ddev);
        struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
        u32 hw_videoport = tcrtc->hw_videoport;
        unsigned long flags;
@@ -38,7 +38,7 @@ void tidss_irq_enable_vblank(struct drm_crtc *crtc)
 void tidss_irq_disable_vblank(struct drm_crtc *crtc)
 {
        struct drm_device *ddev = crtc->dev;
-       struct tidss_device *tidss = ddev->dev_private;
+       struct tidss_device *tidss = to_tidss(ddev);
        struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
        u32 hw_videoport = tcrtc->hw_videoport;
        unsigned long flags;
@@ -53,7 +53,7 @@ void tidss_irq_disable_vblank(struct drm_crtc *crtc)
 irqreturn_t tidss_irq_handler(int irq, void *arg)
 {
        struct drm_device *ddev = (struct drm_device *)arg;
-       struct tidss_device *tidss = ddev->dev_private;
+       struct tidss_device *tidss = to_tidss(ddev);
        unsigned int id;
        dispc_irq_t irqstatus;
 
@@ -95,7 +95,7 @@ void tidss_irq_resume(struct tidss_device *tidss)
 
 void tidss_irq_preinstall(struct drm_device *ddev)
 {
-       struct tidss_device *tidss = ddev->dev_private;
+       struct tidss_device *tidss = to_tidss(ddev);
 
        spin_lock_init(&tidss->wait_lock);
 
@@ -109,7 +109,7 @@ void tidss_irq_preinstall(struct drm_device *ddev)
 
 int tidss_irq_postinstall(struct drm_device *ddev)
 {
-       struct tidss_device *tidss = ddev->dev_private;
+       struct tidss_device *tidss = to_tidss(ddev);
        unsigned long flags;
        unsigned int i;
 
@@ -138,7 +138,7 @@ int tidss_irq_postinstall(struct drm_device *ddev)
 
 void tidss_irq_uninstall(struct drm_device *ddev)
 {
-       struct tidss_device *tidss = ddev->dev_private;
+       struct tidss_device *tidss = to_tidss(ddev);
 
        tidss_runtime_get(tidss);
        dispc_set_irqenable(tidss->dispc, 0);
index 7d419960b030934fc39a205c71113478abc38a9a..4b99e9fa84a5bf801920b6ca6863346557c260e3 100644 (file)
@@ -25,7 +25,7 @@
 static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state)
 {
        struct drm_device *ddev = old_state->dev;
-       struct tidss_device *tidss = ddev->dev_private;
+       struct tidss_device *tidss = to_tidss(ddev);
 
        dev_dbg(ddev->dev, "%s\n", __func__);
 
@@ -258,7 +258,9 @@ int tidss_modeset_init(struct tidss_device *tidss)
 
        dev_dbg(tidss->dev, "%s\n", __func__);
 
-       drm_mode_config_init(ddev);
+       ret = drmm_mode_config_init(ddev);
+       if (ret)
+               return ret;
 
        ddev->mode_config.min_width = 8;
        ddev->mode_config.min_height = 8;
@@ -270,11 +272,11 @@ int tidss_modeset_init(struct tidss_device *tidss)
 
        ret = tidss_dispc_modeset_init(tidss);
        if (ret)
-               goto err_mode_config_cleanup;
+               return ret;
 
        ret = drm_vblank_init(ddev, tidss->num_crtcs);
        if (ret)
-               goto err_mode_config_cleanup;
+               return ret;
 
        /* Start with vertical blanking interrupt reporting disabled. */
        for (i = 0; i < tidss->num_crtcs; ++i)
@@ -285,15 +287,4 @@ int tidss_modeset_init(struct tidss_device *tidss)
        dev_dbg(tidss->dev, "%s done\n", __func__);
 
        return 0;
-
-err_mode_config_cleanup:
-       drm_mode_config_cleanup(ddev);
-       return ret;
-}
-
-void tidss_modeset_cleanup(struct tidss_device *tidss)
-{
-       struct drm_device *ddev = &tidss->ddev;
-
-       drm_mode_config_cleanup(ddev);
 }
index dda5625d01283d034870c1b13643d03251e08b7f..99aaff099f22980a1c384fab3fcfe01ca097caf4 100644 (file)
@@ -10,6 +10,5 @@
 struct tidss_device;
 
 int tidss_modeset_init(struct tidss_device *tidss);
-void tidss_modeset_cleanup(struct tidss_device *tidss);
 
 #endif
index 798488948fc539716bfe53a5316583b1360ee5ee..0a563eabcbb920cc15bbbb836816318ed80ed970 100644 (file)
@@ -22,7 +22,7 @@ static int tidss_plane_atomic_check(struct drm_plane *plane,
                                    struct drm_plane_state *state)
 {
        struct drm_device *ddev = plane->dev;
-       struct tidss_device *tidss = ddev->dev_private;
+       struct tidss_device *tidss = to_tidss(ddev);
        struct tidss_plane *tplane = to_tidss_plane(plane);
        const struct drm_format_info *finfo;
        struct drm_crtc_state *crtc_state;
@@ -101,7 +101,7 @@ static void tidss_plane_atomic_update(struct drm_plane *plane,
                                      struct drm_plane_state *old_state)
 {
        struct drm_device *ddev = plane->dev;
-       struct tidss_device *tidss = ddev->dev_private;
+       struct tidss_device *tidss = to_tidss(ddev);
        struct tidss_plane *tplane = to_tidss_plane(plane);
        struct drm_plane_state *state = plane->state;
        u32 hw_videoport;
@@ -133,7 +133,7 @@ static void tidss_plane_atomic_disable(struct drm_plane *plane,
                                       struct drm_plane_state *old_state)
 {
        struct drm_device *ddev = plane->dev;
-       struct tidss_device *tidss = ddev->dev_private;
+       struct tidss_device *tidss = to_tidss(ddev);
        struct tidss_plane *tplane = to_tidss_plane(plane);
 
        dev_dbg(ddev->dev, "%s\n", __func__);
index 0791a0200cc3c5690d234c612ec6e0fcf6f04d2d..a5e9ee4c7fbf4203477e7283c5aea128d2240b7f 100644 (file)
@@ -390,10 +390,9 @@ static int tilcdc_init(struct drm_driver *ddrv, struct device *dev)
        ret = drm_dev_register(ddev, 0);
        if (ret)
                goto init_failed;
+       priv->is_registered = true;
 
        drm_fbdev_generic_setup(ddev, bpp);
-
-       priv->is_registered = true;
        return 0;
 
 init_failed:
@@ -478,26 +477,17 @@ static struct drm_info_list tilcdc_debugfs_list[] = {
                { "mm",   tilcdc_mm_show,   0 },
 };
 
-static int tilcdc_debugfs_init(struct drm_minor *minor)
+static void tilcdc_debugfs_init(struct drm_minor *minor)
 {
-       struct drm_device *dev = minor->dev;
        struct tilcdc_module *mod;
-       int ret;
 
-       ret = drm_debugfs_create_files(tilcdc_debugfs_list,
-                       ARRAY_SIZE(tilcdc_debugfs_list),
-                       minor->debugfs_root, minor);
+       drm_debugfs_create_files(tilcdc_debugfs_list,
+                                ARRAY_SIZE(tilcdc_debugfs_list),
+                                minor->debugfs_root, minor);
 
        list_for_each_entry(mod, &module_list, list)
                if (mod->funcs->debugfs_init)
                        mod->funcs->debugfs_init(mod, minor);
-
-       if (ret) {
-               dev_err(dev->dev, "could not install tilcdc_debugfs_list\n");
-               return ret;
-       }
-
-       return ret;
 }
 #endif
 
index 28b7f703236e88401acf4783eca50f7ea08ddb20..b177525588c14ac6c96562e4f6b40ac5975e6143 100644 (file)
@@ -10,6 +10,7 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_bridge.h>
 #include <drm/drm_of.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "tilcdc_drv.h"
 #include "tilcdc_external.h"
@@ -83,10 +84,6 @@ int tilcdc_add_component_encoder(struct drm_device *ddev)
        return 0;
 }
 
-static const struct drm_encoder_funcs tilcdc_external_encoder_funcs = {
-       .destroy        = drm_encoder_cleanup,
-};
-
 static
 int tilcdc_attach_bridge(struct drm_device *ddev, struct drm_bridge *bridge)
 {
@@ -131,9 +128,8 @@ int tilcdc_attach_external_device(struct drm_device *ddev)
        if (!priv->external_encoder)
                return -ENOMEM;
 
-       ret = drm_encoder_init(ddev, priv->external_encoder,
-                              &tilcdc_external_encoder_funcs,
-                              DRM_MODE_ENCODER_NONE, NULL);
+       ret = drm_simple_encoder_init(ddev, priv->external_encoder,
+                                     DRM_MODE_ENCODER_NONE);
        if (ret) {
                dev_err(ddev->dev, "drm_encoder_init() failed %d\n", ret);
                return ret;
index 5584e656b8575822f9c76cb51ab2156cef0c904c..12823d60c4e89542f38dd61139ff7ba7523b944b 100644 (file)
@@ -16,6 +16,7 @@
 #include <drm/drm_connector.h>
 #include <drm/drm_modeset_helper_vtables.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "tilcdc_drv.h"
 #include "tilcdc_panel.h"
@@ -74,10 +75,6 @@ static void panel_encoder_mode_set(struct drm_encoder *encoder,
        /* nothing needed */
 }
 
-static const struct drm_encoder_funcs panel_encoder_funcs = {
-               .destroy        = drm_encoder_cleanup,
-};
-
 static const struct drm_encoder_helper_funcs panel_encoder_helper_funcs = {
                .dpms           = panel_encoder_dpms,
                .prepare        = panel_encoder_prepare,
@@ -102,8 +99,7 @@ static struct drm_encoder *panel_encoder_create(struct drm_device *dev,
        encoder = &panel_encoder->base;
        encoder->possible_crtcs = 1;
 
-       ret = drm_encoder_init(dev, encoder, &panel_encoder_funcs,
-                       DRM_MODE_ENCODER_LVDS, NULL);
+       ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS);
        if (ret < 0)
                goto fail;
 
index 4160e74e4751d8908a6e84ef0a9e3f56fc364eec..2b6414f0fa75957e0ce70065625993cc81cd2ff6 100644 (file)
@@ -1,5 +1,24 @@
 # SPDX-License-Identifier: GPL-2.0-only
 
+config DRM_CIRRUS_QEMU
+       tristate "Cirrus driver for QEMU emulated device"
+       depends on DRM && PCI && MMU
+       select DRM_KMS_HELPER
+       select DRM_GEM_SHMEM_HELPER
+       help
+        This is a KMS driver for emulated cirrus device in qemu.
+        It is *NOT* intended for real cirrus devices. This requires
+        the modesetting userspace X.org driver.
+
+        Cirrus is obsolete, the hardware was designed in the 90ies
+        and can't keep up with todays needs.  More background:
+        https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
+
+        Better alternatives are:
+          - stdvga (DRM_BOCHS, qemu -vga std, default in qemu 2.2+)
+          - qxl (DRM_QXL, qemu -vga qxl, works best with spice)
+          - virtio (DRM_VIRTIO_GPU), qemu -vga virtio)
+
 config DRM_GM12U320
        tristate "GM12U320 driver for USB projectors"
        depends on DRM && USB
index c96ceee71453b3b56a4850922023c2cef15d8eae..6ae4e9e5a35fbd883dda1777fd4941f27bef3dd8 100644 (file)
@@ -1,5 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0-only
 
+obj-$(CONFIG_DRM_CIRRUS_QEMU)          += cirrus.o
 obj-$(CONFIG_DRM_GM12U320)             += gm12u320.o
 obj-$(CONFIG_TINYDRM_HX8357D)          += hx8357d.o
 obj-$(CONFIG_TINYDRM_ILI9225)          += ili9225.o
similarity index 91%
rename from drivers/gpu/drm/cirrus/cirrus.c
rename to drivers/gpu/drm/tiny/cirrus.c
index d2ff63ce8eaff3019b415cb668903800a977c6fd..744a8e337e41e9f314fd82a18377cbdd73e4fe16 100644 (file)
@@ -35,6 +35,7 @@
 #include <drm/drm_gem_shmem_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_ioctl.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_modeset_helper_vtables.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_simple_kms_helper.h>
@@ -58,6 +59,8 @@ struct cirrus_device {
        void __iomem                   *mmio;
 };
 
+#define to_cirrus(_dev) container_of(_dev, struct cirrus_device, dev)
+
 /* ------------------------------------------------------------------ */
 /*
  * The meat of this driver. The core passes us a mode and we have to program
@@ -310,7 +313,7 @@ static int cirrus_mode_set(struct cirrus_device *cirrus,
 static int cirrus_fb_blit_rect(struct drm_framebuffer *fb,
                               struct drm_rect *rect)
 {
-       struct cirrus_device *cirrus = fb->dev->dev_private;
+       struct cirrus_device *cirrus = to_cirrus(fb->dev);
        void *vmap;
        int idx, ret;
 
@@ -435,7 +438,7 @@ static void cirrus_pipe_enable(struct drm_simple_display_pipe *pipe,
                               struct drm_crtc_state *crtc_state,
                               struct drm_plane_state *plane_state)
 {
-       struct cirrus_device *cirrus = pipe->crtc.dev->dev_private;
+       struct cirrus_device *cirrus = to_cirrus(pipe->crtc.dev);
 
        cirrus_mode_set(cirrus, &crtc_state->mode, plane_state->fb);
        cirrus_fb_blit_fullscreen(plane_state->fb);
@@ -444,7 +447,7 @@ static void cirrus_pipe_enable(struct drm_simple_display_pipe *pipe,
 static void cirrus_pipe_update(struct drm_simple_display_pipe *pipe,
                               struct drm_plane_state *old_state)
 {
-       struct cirrus_device *cirrus = pipe->crtc.dev->dev_private;
+       struct cirrus_device *cirrus = to_cirrus(pipe->crtc.dev);
        struct drm_plane_state *state = pipe->plane.state;
        struct drm_crtc *crtc = &pipe->crtc;
        struct drm_rect rect;
@@ -509,11 +512,15 @@ static const struct drm_mode_config_funcs cirrus_mode_config_funcs = {
        .atomic_commit = drm_atomic_helper_commit,
 };
 
-static void cirrus_mode_config_init(struct cirrus_device *cirrus)
+static int cirrus_mode_config_init(struct cirrus_device *cirrus)
 {
        struct drm_device *dev = &cirrus->dev;
+       int ret;
+
+       ret = drmm_mode_config_init(dev);
+       if (ret)
+               return ret;
 
-       drm_mode_config_init(dev);
        dev->mode_config.min_width = 0;
        dev->mode_config.min_height = 0;
        dev->mode_config.max_width = CIRRUS_MAX_PITCH / 2;
@@ -521,18 +528,12 @@ static void cirrus_mode_config_init(struct cirrus_device *cirrus)
        dev->mode_config.preferred_depth = 16;
        dev->mode_config.prefer_shadow = 0;
        dev->mode_config.funcs = &cirrus_mode_config_funcs;
+
+       return 0;
 }
 
 /* ------------------------------------------------------------------ */
 
-static void cirrus_release(struct drm_device *dev)
-{
-       struct cirrus_device *cirrus = dev->dev_private;
-
-       drm_mode_config_cleanup(dev);
-       kfree(cirrus);
-}
-
 DEFINE_DRM_GEM_FOPS(cirrus_fops);
 
 static struct drm_driver cirrus_driver = {
@@ -546,7 +547,6 @@ static struct drm_driver cirrus_driver = {
 
        .fops            = &cirrus_fops,
        DRM_GEM_SHMEM_DRIVER_OPS,
-       .release         = cirrus_release,
 };
 
 static int cirrus_pci_probe(struct pci_dev *pdev,
@@ -560,7 +560,7 @@ static int cirrus_pci_probe(struct pci_dev *pdev,
        if (ret)
                return ret;
 
-       ret = pci_enable_device(pdev);
+       ret = pcim_enable_device(pdev);
        if (ret)
                return ret;
 
@@ -569,36 +569,34 @@ static int cirrus_pci_probe(struct pci_dev *pdev,
                return ret;
 
        ret = -ENOMEM;
-       cirrus = kzalloc(sizeof(*cirrus), GFP_KERNEL);
-       if (cirrus == NULL)
-               goto err_pci_release;
+       cirrus = devm_drm_dev_alloc(&pdev->dev, &cirrus_driver,
+                                   struct cirrus_device, dev);
+       if (IS_ERR(cirrus))
+               return PTR_ERR(cirrus);
 
        dev = &cirrus->dev;
-       ret = drm_dev_init(dev, &cirrus_driver, &pdev->dev);
-       if (ret)
-               goto err_free_cirrus;
-       dev->dev_private = cirrus;
 
-       ret = -ENOMEM;
-       cirrus->vram = ioremap(pci_resource_start(pdev, 0),
-                              pci_resource_len(pdev, 0));
+       cirrus->vram = devm_ioremap(&pdev->dev, pci_resource_start(pdev, 0),
+                                   pci_resource_len(pdev, 0));
        if (cirrus->vram == NULL)
-               goto err_dev_put;
+               return -ENOMEM;
 
-       cirrus->mmio = ioremap(pci_resource_start(pdev, 1),
-                              pci_resource_len(pdev, 1));
+       cirrus->mmio = devm_ioremap(&pdev->dev, pci_resource_start(pdev, 1),
+                                   pci_resource_len(pdev, 1));
        if (cirrus->mmio == NULL)
-               goto err_unmap_vram;
+               return -ENOMEM;
 
-       cirrus_mode_config_init(cirrus);
+       ret = cirrus_mode_config_init(cirrus);
+       if (ret)
+               return ret;
 
        ret = cirrus_conn_init(cirrus);
        if (ret < 0)
-               goto err_cleanup;
+               return ret;
 
        ret = cirrus_pipe_init(cirrus);
        if (ret < 0)
-               goto err_cleanup;
+               return ret;
 
        drm_mode_config_reset(dev);
 
@@ -606,36 +604,18 @@ static int cirrus_pci_probe(struct pci_dev *pdev,
        pci_set_drvdata(pdev, dev);
        ret = drm_dev_register(dev, 0);
        if (ret)
-               goto err_cleanup;
+               return ret;
 
        drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth);
        return 0;
-
-err_cleanup:
-       drm_mode_config_cleanup(dev);
-       iounmap(cirrus->mmio);
-err_unmap_vram:
-       iounmap(cirrus->vram);
-err_dev_put:
-       drm_dev_put(dev);
-err_free_cirrus:
-       kfree(cirrus);
-err_pci_release:
-       pci_release_regions(pdev);
-       return ret;
 }
 
 static void cirrus_pci_remove(struct pci_dev *pdev)
 {
        struct drm_device *dev = pci_get_drvdata(pdev);
-       struct cirrus_device *cirrus = dev->dev_private;
 
        drm_dev_unplug(dev);
        drm_atomic_helper_shutdown(dev);
-       iounmap(cirrus->mmio);
-       iounmap(cirrus->vram);
-       drm_dev_put(dev);
-       pci_release_regions(pdev);
 }
 
 static const struct pci_device_id pciidlist[] = {
index a48173441ae0db23de8f7d62f03c80ebe4cc4512..cc397671f6898851a73a04ebf9e802b84847fcb9 100644 (file)
@@ -19,6 +19,7 @@
 #include <drm/drm_gem_shmem_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_ioctl.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_modeset_helper_vtables.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_simple_kms_helper.h>
@@ -87,18 +88,18 @@ struct gm12u320_device {
        struct usb_device               *udev;
        unsigned char                   *cmd_buf;
        unsigned char                   *data_buf[GM12U320_BLOCK_COUNT];
-       bool                             pipe_enabled;
        struct {
-               bool                     run;
-               struct workqueue_struct *workq;
-               struct work_struct       work;
-               wait_queue_head_t        waitq;
+               struct delayed_work       work;
                struct mutex             lock;
                struct drm_framebuffer  *fb;
                struct drm_rect          rect;
+               int frame;
+               int draw_status_timeout;
        } fb_update;
 };
 
+#define to_gm12u320(__dev) container_of(__dev, struct gm12u320_device, dev)
+
 static const char cmd_data[CMD_SIZE] = {
        0x55, 0x53, 0x42, 0x43, 0x00, 0x00, 0x00, 0x00,
        0x68, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x10, 0xff,
@@ -159,7 +160,7 @@ static int gm12u320_usb_alloc(struct gm12u320_device *gm12u320)
        int i, block_size;
        const char *hdr;
 
-       gm12u320->cmd_buf = kmalloc(CMD_SIZE, GFP_KERNEL);
+       gm12u320->cmd_buf = drmm_kmalloc(&gm12u320->dev, CMD_SIZE, GFP_KERNEL);
        if (!gm12u320->cmd_buf)
                return -ENOMEM;
 
@@ -172,7 +173,8 @@ static int gm12u320_usb_alloc(struct gm12u320_device *gm12u320)
                        hdr = data_block_header;
                }
 
-               gm12u320->data_buf[i] = kzalloc(block_size, GFP_KERNEL);
+               gm12u320->data_buf[i] = drmm_kzalloc(&gm12u320->dev,
+                                                    block_size, GFP_KERNEL);
                if (!gm12u320->data_buf[i])
                        return -ENOMEM;
 
@@ -182,26 +184,9 @@ static int gm12u320_usb_alloc(struct gm12u320_device *gm12u320)
                       data_block_footer, DATA_BLOCK_FOOTER_SIZE);
        }
 
-       gm12u320->fb_update.workq = create_singlethread_workqueue(DRIVER_NAME);
-       if (!gm12u320->fb_update.workq)
-               return -ENOMEM;
-
        return 0;
 }
 
-static void gm12u320_usb_free(struct gm12u320_device *gm12u320)
-{
-       int i;
-
-       if (gm12u320->fb_update.workq)
-               destroy_workqueue(gm12u320->fb_update.workq);
-
-       for (i = 0; i < GM12U320_BLOCK_COUNT; i++)
-               kfree(gm12u320->data_buf[i]);
-
-       kfree(gm12u320->cmd_buf);
-}
-
 static int gm12u320_misc_request(struct gm12u320_device *gm12u320,
                                 u8 req_a, u8 req_b,
                                 u8 arg_a, u8 arg_b, u8 arg_c, u8 arg_d)
@@ -344,80 +329,77 @@ unlock:
 static void gm12u320_fb_update_work(struct work_struct *work)
 {
        struct gm12u320_device *gm12u320 =
-               container_of(work, struct gm12u320_device, fb_update.work);
-       int draw_status_timeout = FIRST_FRAME_TIMEOUT;
+               container_of(to_delayed_work(work), struct gm12u320_device,
+                            fb_update.work);
        int block, block_size, len;
-       int frame = 0;
        int ret = 0;
 
-       while (gm12u320->fb_update.run) {
-               gm12u320_copy_fb_to_blocks(gm12u320);
-
-               for (block = 0; block < GM12U320_BLOCK_COUNT; block++) {
-                       if (block == GM12U320_BLOCK_COUNT - 1)
-                               block_size = DATA_LAST_BLOCK_SIZE;
-                       else
-                               block_size = DATA_BLOCK_SIZE;
-
-                       /* Send data command to device */
-                       memcpy(gm12u320->cmd_buf, cmd_data, CMD_SIZE);
-                       gm12u320->cmd_buf[8] = block_size & 0xff;
-                       gm12u320->cmd_buf[9] = block_size >> 8;
-                       gm12u320->cmd_buf[20] = 0xfc - block * 4;
-                       gm12u320->cmd_buf[21] = block | (frame << 7);
-
-                       ret = usb_bulk_msg(gm12u320->udev,
-                               usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT),
-                               gm12u320->cmd_buf, CMD_SIZE, &len,
-                               CMD_TIMEOUT);
-                       if (ret || len != CMD_SIZE)
-                               goto err;
-
-                       /* Send data block to device */
-                       ret = usb_bulk_msg(gm12u320->udev,
-                               usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT),
-                               gm12u320->data_buf[block], block_size,
-                               &len, DATA_TIMEOUT);
-                       if (ret || len != block_size)
-                               goto err;
-
-                       /* Read status */
-                       ret = usb_bulk_msg(gm12u320->udev,
-                               usb_rcvbulkpipe(gm12u320->udev, DATA_RCV_EPT),
-                               gm12u320->cmd_buf, READ_STATUS_SIZE, &len,
-                               CMD_TIMEOUT);
-                       if (ret || len != READ_STATUS_SIZE)
-                               goto err;
-               }
+       gm12u320_copy_fb_to_blocks(gm12u320);
+
+       for (block = 0; block < GM12U320_BLOCK_COUNT; block++) {
+               if (block == GM12U320_BLOCK_COUNT - 1)
+                       block_size = DATA_LAST_BLOCK_SIZE;
+               else
+                       block_size = DATA_BLOCK_SIZE;
+
+               /* Send data command to device */
+               memcpy(gm12u320->cmd_buf, cmd_data, CMD_SIZE);
+               gm12u320->cmd_buf[8] = block_size & 0xff;
+               gm12u320->cmd_buf[9] = block_size >> 8;
+               gm12u320->cmd_buf[20] = 0xfc - block * 4;
+               gm12u320->cmd_buf[21] =
+                       block | (gm12u320->fb_update.frame << 7);
 
-               /* Send draw command to device */
-               memcpy(gm12u320->cmd_buf, cmd_draw, CMD_SIZE);
                ret = usb_bulk_msg(gm12u320->udev,
                        usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT),
-                       gm12u320->cmd_buf, CMD_SIZE, &len, CMD_TIMEOUT);
+                       gm12u320->cmd_buf, CMD_SIZE, &len,
+                       CMD_TIMEOUT);
                if (ret || len != CMD_SIZE)
                        goto err;
 
+               /* Send data block to device */
+               ret = usb_bulk_msg(gm12u320->udev,
+                       usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT),
+                       gm12u320->data_buf[block], block_size,
+                       &len, DATA_TIMEOUT);
+               if (ret || len != block_size)
+                       goto err;
+
                /* Read status */
                ret = usb_bulk_msg(gm12u320->udev,
                        usb_rcvbulkpipe(gm12u320->udev, DATA_RCV_EPT),
                        gm12u320->cmd_buf, READ_STATUS_SIZE, &len,
-                       draw_status_timeout);
+                       CMD_TIMEOUT);
                if (ret || len != READ_STATUS_SIZE)
                        goto err;
-
-               draw_status_timeout = CMD_TIMEOUT;
-               frame = !frame;
-
-               /*
-                * We must draw a frame every 2s otherwise the projector
-                * switches back to showing its logo.
-                */
-               wait_event_timeout(gm12u320->fb_update.waitq,
-                                  !gm12u320->fb_update.run ||
-                                       gm12u320->fb_update.fb != NULL,
-                                  IDLE_TIMEOUT);
        }
+
+       /* Send draw command to device */
+       memcpy(gm12u320->cmd_buf, cmd_draw, CMD_SIZE);
+       ret = usb_bulk_msg(gm12u320->udev,
+               usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT),
+               gm12u320->cmd_buf, CMD_SIZE, &len, CMD_TIMEOUT);
+       if (ret || len != CMD_SIZE)
+               goto err;
+
+       /* Read status */
+       ret = usb_bulk_msg(gm12u320->udev,
+               usb_rcvbulkpipe(gm12u320->udev, DATA_RCV_EPT),
+               gm12u320->cmd_buf, READ_STATUS_SIZE, &len,
+               gm12u320->fb_update.draw_status_timeout);
+       if (ret || len != READ_STATUS_SIZE)
+               goto err;
+
+       gm12u320->fb_update.draw_status_timeout = CMD_TIMEOUT;
+       gm12u320->fb_update.frame = !gm12u320->fb_update.frame;
+
+       /*
+        * We must draw a frame every 2s otherwise the projector
+        * switches back to showing its logo.
+        */
+       queue_delayed_work(system_long_wq, &gm12u320->fb_update.work,
+                          IDLE_TIMEOUT);
+
        return;
 err:
        /* Do not log errors caused by module unload or device unplug */
@@ -428,7 +410,7 @@ err:
 static void gm12u320_fb_mark_dirty(struct drm_framebuffer *fb,
                                   struct drm_rect *dirty)
 {
-       struct gm12u320_device *gm12u320 = fb->dev->dev_private;
+       struct gm12u320_device *gm12u320 = to_gm12u320(fb->dev);
        struct drm_framebuffer *old_fb = NULL;
        bool wakeup = false;
 
@@ -452,36 +434,24 @@ static void gm12u320_fb_mark_dirty(struct drm_framebuffer *fb,
        mutex_unlock(&gm12u320->fb_update.lock);
 
        if (wakeup)
-               wake_up(&gm12u320->fb_update.waitq);
+               mod_delayed_work(system_long_wq, &gm12u320->fb_update.work, 0);
 
        if (old_fb)
                drm_framebuffer_put(old_fb);
 }
 
-static void gm12u320_start_fb_update(struct gm12u320_device *gm12u320)
-{
-       mutex_lock(&gm12u320->fb_update.lock);
-       gm12u320->fb_update.run = true;
-       mutex_unlock(&gm12u320->fb_update.lock);
-
-       queue_work(gm12u320->fb_update.workq, &gm12u320->fb_update.work);
-}
-
 static void gm12u320_stop_fb_update(struct gm12u320_device *gm12u320)
 {
-       mutex_lock(&gm12u320->fb_update.lock);
-       gm12u320->fb_update.run = false;
-       mutex_unlock(&gm12u320->fb_update.lock);
+       struct drm_framebuffer *old_fb;
 
-       wake_up(&gm12u320->fb_update.waitq);
-       cancel_work_sync(&gm12u320->fb_update.work);
+       cancel_delayed_work_sync(&gm12u320->fb_update.work);
 
        mutex_lock(&gm12u320->fb_update.lock);
-       if (gm12u320->fb_update.fb) {
-               drm_framebuffer_put(gm12u320->fb_update.fb);
-               gm12u320->fb_update.fb = NULL;
-       }
+       old_fb = gm12u320->fb_update.fb;
+       gm12u320->fb_update.fb = NULL;
        mutex_unlock(&gm12u320->fb_update.lock);
+
+       drm_framebuffer_put(old_fb);
 }
 
 static int gm12u320_set_ecomode(struct gm12u320_device *gm12u320)
@@ -589,20 +559,18 @@ static void gm12u320_pipe_enable(struct drm_simple_display_pipe *pipe,
                                 struct drm_crtc_state *crtc_state,
                                 struct drm_plane_state *plane_state)
 {
-       struct gm12u320_device *gm12u320 = pipe->crtc.dev->dev_private;
        struct drm_rect rect = { 0, 0, GM12U320_USER_WIDTH, GM12U320_HEIGHT };
+       struct gm12u320_device *gm12u320 = to_gm12u320(pipe->crtc.dev);
 
+       gm12u320->fb_update.draw_status_timeout = FIRST_FRAME_TIMEOUT;
        gm12u320_fb_mark_dirty(plane_state->fb, &rect);
-       gm12u320_start_fb_update(gm12u320);
-       gm12u320->pipe_enabled = true;
 }
 
 static void gm12u320_pipe_disable(struct drm_simple_display_pipe *pipe)
 {
-       struct gm12u320_device *gm12u320 = pipe->crtc.dev->dev_private;
+       struct gm12u320_device *gm12u320 = to_gm12u320(pipe->crtc.dev);
 
        gm12u320_stop_fb_update(gm12u320);
-       gm12u320->pipe_enabled = false;
 }
 
 static void gm12u320_pipe_update(struct drm_simple_display_pipe *pipe,
@@ -630,16 +598,6 @@ static const uint64_t gm12u320_pipe_modifiers[] = {
        DRM_FORMAT_MOD_INVALID
 };
 
-static void gm12u320_driver_release(struct drm_device *dev)
-{
-       struct gm12u320_device *gm12u320 = dev->dev_private;
-
-       gm12u320_usb_free(gm12u320);
-       drm_mode_config_cleanup(dev);
-       drm_dev_fini(dev);
-       kfree(gm12u320);
-}
-
 DEFINE_DRM_GEM_FOPS(gm12u320_fops);
 
 static struct drm_driver gm12u320_drm_driver = {
@@ -651,7 +609,6 @@ static struct drm_driver gm12u320_drm_driver = {
        .major           = DRIVER_MAJOR,
        .minor           = DRIVER_MINOR,
 
-       .release         = gm12u320_driver_release,
        .fops            = &gm12u320_fops,
        DRM_GEM_SHMEM_DRIVER_OPS,
 };
@@ -676,24 +633,21 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
        if (interface->cur_altsetting->desc.bInterfaceNumber != 0)
                return -ENODEV;
 
-       gm12u320 = kzalloc(sizeof(*gm12u320), GFP_KERNEL);
-       if (gm12u320 == NULL)
-               return -ENOMEM;
+       gm12u320 = devm_drm_dev_alloc(&interface->dev, &gm12u320_drm_driver,
+                                     struct gm12u320_device, dev);
+       if (IS_ERR(gm12u320))
+               return PTR_ERR(gm12u320);
 
        gm12u320->udev = interface_to_usbdev(interface);
-       INIT_WORK(&gm12u320->fb_update.work, gm12u320_fb_update_work);
+       INIT_DELAYED_WORK(&gm12u320->fb_update.work, gm12u320_fb_update_work);
        mutex_init(&gm12u320->fb_update.lock);
-       init_waitqueue_head(&gm12u320->fb_update.waitq);
 
        dev = &gm12u320->dev;
-       ret = drm_dev_init(dev, &gm12u320_drm_driver, &interface->dev);
-       if (ret) {
-               kfree(gm12u320);
+
+       ret = drmm_mode_config_init(dev);
+       if (ret)
                return ret;
-       }
-       dev->dev_private = gm12u320;
 
-       drm_mode_config_init(dev);
        dev->mode_config.min_width = GM12U320_USER_WIDTH;
        dev->mode_config.max_width = GM12U320_USER_WIDTH;
        dev->mode_config.min_height = GM12U320_HEIGHT;
@@ -702,15 +656,15 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
 
        ret = gm12u320_usb_alloc(gm12u320);
        if (ret)
-               goto err_put;
+               return ret;
 
        ret = gm12u320_set_ecomode(gm12u320);
        if (ret)
-               goto err_put;
+               return ret;
 
        ret = gm12u320_conn_init(gm12u320);
        if (ret)
-               goto err_put;
+               return ret;
 
        ret = drm_simple_display_pipe_init(&gm12u320->dev,
                                           &gm12u320->pipe,
@@ -720,56 +674,44 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
                                           gm12u320_pipe_modifiers,
                                           &gm12u320->conn);
        if (ret)
-               goto err_put;
+               return ret;
 
        drm_mode_config_reset(dev);
 
        usb_set_intfdata(interface, dev);
        ret = drm_dev_register(dev, 0);
        if (ret)
-               goto err_put;
+               return ret;
 
        drm_fbdev_generic_setup(dev, 0);
 
        return 0;
-
-err_put:
-       drm_dev_put(dev);
-       return ret;
 }
 
 static void gm12u320_usb_disconnect(struct usb_interface *interface)
 {
        struct drm_device *dev = usb_get_intfdata(interface);
-       struct gm12u320_device *gm12u320 = dev->dev_private;
 
-       gm12u320_stop_fb_update(gm12u320);
        drm_dev_unplug(dev);
-       drm_dev_put(dev);
+       drm_atomic_helper_shutdown(dev);
 }
 
 static __maybe_unused int gm12u320_suspend(struct usb_interface *interface,
                                           pm_message_t message)
 {
        struct drm_device *dev = usb_get_intfdata(interface);
-       struct gm12u320_device *gm12u320 = dev->dev_private;
 
-       if (gm12u320->pipe_enabled)
-               gm12u320_stop_fb_update(gm12u320);
-
-       return 0;
+       return drm_mode_config_helper_suspend(dev);
 }
 
 static __maybe_unused int gm12u320_resume(struct usb_interface *interface)
 {
        struct drm_device *dev = usb_get_intfdata(interface);
-       struct gm12u320_device *gm12u320 = dev->dev_private;
+       struct gm12u320_device *gm12u320 = to_gm12u320(dev);
 
        gm12u320_set_ecomode(gm12u320);
-       if (gm12u320->pipe_enabled)
-               gm12u320_start_fb_update(gm12u320);
 
-       return 0;
+       return drm_mode_config_helper_resume(dev);
 }
 
 static const struct usb_device_id id_table[] = {
index 9af8ff84974f5ca99a8095c5965640bef46249fb..b4bc358a3269a7363d3471ed86757fa872b8b7ea 100644 (file)
@@ -21,6 +21,7 @@
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_mipi_dbi.h>
 #include <drm/drm_modeset_helper.h>
 #include <video/mipi_display.h>
@@ -195,7 +196,6 @@ DEFINE_DRM_GEM_CMA_FOPS(hx8357d_fops);
 static struct drm_driver hx8357d_driver = {
        .driver_features        = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
        .fops                   = &hx8357d_fops,
-       .release                = mipi_dbi_release,
        DRM_GEM_CMA_VMAP_DRIVER_OPS,
        .debugfs_init           = mipi_dbi_debugfs_init,
        .name                   = "hx8357d",
@@ -226,18 +226,12 @@ static int hx8357d_probe(struct spi_device *spi)
        u32 rotation = 0;
        int ret;
 
-       dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
-       if (!dbidev)
-               return -ENOMEM;
+       dbidev = devm_drm_dev_alloc(dev, &hx8357d_driver,
+                                   struct mipi_dbi_dev, drm);
+       if (IS_ERR(dbidev))
+               return PTR_ERR(dbidev);
 
        drm = &dbidev->drm;
-       ret = devm_drm_dev_init(dev, drm, &hx8357d_driver);
-       if (ret) {
-               kfree(dbidev);
-               return ret;
-       }
-
-       drm_mode_config_init(drm);
 
        dc = devm_gpiod_get(dev, "dc", GPIOD_OUT_LOW);
        if (IS_ERR(dc)) {
index 802fb8dde1b61b98a047de9f2a508aac1e0b23fc..d1a5ab6747d5cd8012d3c1876ffbf3f57aea82cf 100644 (file)
@@ -24,6 +24,7 @@
 #include <drm/drm_fourcc.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_mipi_dbi.h>
 #include <drm/drm_rect.h>
 
@@ -345,7 +346,6 @@ DEFINE_DRM_GEM_CMA_FOPS(ili9225_fops);
 static struct drm_driver ili9225_driver = {
        .driver_features        = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
        .fops                   = &ili9225_fops,
-       .release                = mipi_dbi_release,
        DRM_GEM_CMA_VMAP_DRIVER_OPS,
        .name                   = "ili9225",
        .desc                   = "Ilitek ILI9225",
@@ -376,19 +376,13 @@ static int ili9225_probe(struct spi_device *spi)
        u32 rotation = 0;
        int ret;
 
-       dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
-       if (!dbidev)
-               return -ENOMEM;
+       dbidev = devm_drm_dev_alloc(dev, &ili9225_driver,
+                                   struct mipi_dbi_dev, drm);
+       if (IS_ERR(dbidev))
+               return PTR_ERR(dbidev);
 
        dbi = &dbidev->dbi;
        drm = &dbidev->drm;
-       ret = devm_drm_dev_init(dev, drm, &ili9225_driver);
-       if (ret) {
-               kfree(dbidev);
-               return ret;
-       }
-
-       drm_mode_config_init(drm);
 
        dbi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
        if (IS_ERR(dbi->reset)) {
index 33b51dc7faa87359b3b8886ebfa7aa33908a375b..bb819f45a5d3b5d8969aa6df36db567ef8d36be6 100644 (file)
@@ -20,6 +20,7 @@
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_mipi_dbi.h>
 #include <drm/drm_modeset_helper.h>
 #include <video/mipi_display.h>
@@ -151,7 +152,6 @@ DEFINE_DRM_GEM_CMA_FOPS(ili9341_fops);
 static struct drm_driver ili9341_driver = {
        .driver_features        = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
        .fops                   = &ili9341_fops,
-       .release                = mipi_dbi_release,
        DRM_GEM_CMA_VMAP_DRIVER_OPS,
        .debugfs_init           = mipi_dbi_debugfs_init,
        .name                   = "ili9341",
@@ -183,19 +183,13 @@ static int ili9341_probe(struct spi_device *spi)
        u32 rotation = 0;
        int ret;
 
-       dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
-       if (!dbidev)
-               return -ENOMEM;
+       dbidev = devm_drm_dev_alloc(dev, &ili9341_driver,
+                                   struct mipi_dbi_dev, drm);
+       if (IS_ERR(dbidev))
+               return PTR_ERR(dbidev);
 
        dbi = &dbidev->dbi;
        drm = &dbidev->drm;
-       ret = devm_drm_dev_init(dev, drm, &ili9341_driver);
-       if (ret) {
-               kfree(dbidev);
-               return ret;
-       }
-
-       drm_mode_config_init(drm);
 
        dbi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
        if (IS_ERR(dbi->reset)) {
index 532560aebb1e0fa2f08ec33e5f500588f054facf..2702ea557d297568cb0521c6ad34329e38d20e7d 100644 (file)
@@ -19,6 +19,7 @@
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_mipi_dbi.h>
 #include <drm/drm_modeset_helper.h>
 
@@ -164,7 +165,6 @@ DEFINE_DRM_GEM_CMA_FOPS(ili9486_fops);
 static struct drm_driver ili9486_driver = {
        .driver_features        = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
        .fops                   = &ili9486_fops,
-       .release                = mipi_dbi_release,
        DRM_GEM_CMA_VMAP_DRIVER_OPS,
        .debugfs_init           = mipi_dbi_debugfs_init,
        .name                   = "ili9486",
@@ -197,19 +197,13 @@ static int ili9486_probe(struct spi_device *spi)
        u32 rotation = 0;
        int ret;
 
-       dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
-       if (!dbidev)
-               return -ENOMEM;
+       dbidev = devm_drm_dev_alloc(dev, &ili9486_driver,
+                                   struct mipi_dbi_dev, drm);
+       if (IS_ERR(dbidev))
+               return PTR_ERR(dbidev);
 
        dbi = &dbidev->dbi;
        drm = &dbidev->drm;
-       ret = devm_drm_dev_init(dev, drm, &ili9486_driver);
-       if (ret) {
-               kfree(dbidev);
-               return ret;
-       }
-
-       drm_mode_config_init(drm);
 
        dbi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
        if (IS_ERR(dbi->reset)) {
index e2cfd9a171436cd6fe752f35ad9744fa3fec9c73..08ac549ab0f7fc3bc4f5b27f5d0b1277b3aadb8d 100644 (file)
@@ -18,6 +18,7 @@
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_mipi_dbi.h>
 #include <drm/drm_modeset_helper.h>
 #include <video/mipi_display.h>
@@ -155,7 +156,6 @@ DEFINE_DRM_GEM_CMA_FOPS(mi0283qt_fops);
 static struct drm_driver mi0283qt_driver = {
        .driver_features        = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
        .fops                   = &mi0283qt_fops,
-       .release                = mipi_dbi_release,
        DRM_GEM_CMA_VMAP_DRIVER_OPS,
        .debugfs_init           = mipi_dbi_debugfs_init,
        .name                   = "mi0283qt",
@@ -187,19 +187,13 @@ static int mi0283qt_probe(struct spi_device *spi)
        u32 rotation = 0;
        int ret;
 
-       dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
-       if (!dbidev)
-               return -ENOMEM;
+       dbidev = devm_drm_dev_alloc(dev, &mi0283qt_driver,
+                                   struct mipi_dbi_dev, drm);
+       if (IS_ERR(dbidev))
+               return PTR_ERR(dbidev);
 
        dbi = &dbidev->dbi;
        drm = &dbidev->drm;
-       ret = devm_drm_dev_init(dev, drm, &mi0283qt_driver);
-       if (ret) {
-               kfree(dbidev);
-               return ret;
-       }
-
-       drm_mode_config_init(drm);
 
        dbi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
        if (IS_ERR(dbi->reset)) {
index f5ebcaf7ee3a0631e1e6a882bb9e2fb3211d65b0..1c0e7169545b4ae4e8ea7c95e60c15217edb5214 100644 (file)
@@ -31,6 +31,7 @@
 #include <drm/drm_format_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_modes.h>
 #include <drm/drm_rect.h>
 #include <drm/drm_probe_helper.h>
@@ -908,17 +909,6 @@ static const struct drm_mode_config_funcs repaper_mode_config_funcs = {
        .atomic_commit = drm_atomic_helper_commit,
 };
 
-static void repaper_release(struct drm_device *drm)
-{
-       struct repaper_epd *epd = drm_to_epd(drm);
-
-       DRM_DEBUG_DRIVER("\n");
-
-       drm_mode_config_cleanup(drm);
-       drm_dev_fini(drm);
-       kfree(epd);
-}
-
 static const uint32_t repaper_formats[] = {
        DRM_FORMAT_XRGB8888,
 };
@@ -956,7 +946,6 @@ DEFINE_DRM_GEM_CMA_FOPS(repaper_fops);
 static struct drm_driver repaper_driver = {
        .driver_features        = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
        .fops                   = &repaper_fops,
-       .release                = repaper_release,
        DRM_GEM_CMA_VMAP_DRIVER_OPS,
        .name                   = "repaper",
        .desc                   = "Pervasive Displays RePaper e-ink panels",
@@ -1013,19 +1002,16 @@ static int repaper_probe(struct spi_device *spi)
                }
        }
 
-       epd = kzalloc(sizeof(*epd), GFP_KERNEL);
-       if (!epd)
-               return -ENOMEM;
+       epd = devm_drm_dev_alloc(dev, &repaper_driver,
+                                struct repaper_epd, drm);
+       if (IS_ERR(epd))
+               return PTR_ERR(epd);
 
        drm = &epd->drm;
 
-       ret = devm_drm_dev_init(dev, drm, &repaper_driver);
-       if (ret) {
-               kfree(epd);
+       ret = drmm_mode_config_init(drm);
+       if (ret)
                return ret;
-       }
-
-       drm_mode_config_init(drm);
        drm->mode_config.funcs = &repaper_mode_config_funcs;
 
        epd->spi = spi;
index 9ef559dd3191cfd459ab7f497cc983f1d0548976..2a1fae422f7a210e68fc06da4722d31886d6a115 100644 (file)
@@ -21,6 +21,7 @@
 #include <drm/drm_format_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_mipi_dbi.h>
 #include <drm/drm_rect.h>
 
@@ -284,7 +285,6 @@ DEFINE_DRM_GEM_CMA_FOPS(st7586_fops);
 static struct drm_driver st7586_driver = {
        .driver_features        = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
        .fops                   = &st7586_fops,
-       .release                = mipi_dbi_release,
        DRM_GEM_CMA_VMAP_DRIVER_OPS,
        .debugfs_init           = mipi_dbi_debugfs_init,
        .name                   = "st7586",
@@ -317,19 +317,13 @@ static int st7586_probe(struct spi_device *spi)
        size_t bufsize;
        int ret;
 
-       dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
-       if (!dbidev)
-               return -ENOMEM;
+       dbidev = devm_drm_dev_alloc(dev, &st7586_driver,
+                                   struct mipi_dbi_dev, drm);
+       if (IS_ERR(dbidev))
+               return PTR_ERR(dbidev);
 
        dbi = &dbidev->dbi;
        drm = &dbidev->drm;
-       ret = devm_drm_dev_init(dev, drm, &st7586_driver);
-       if (ret) {
-               kfree(dbidev);
-               return ret;
-       }
-
-       drm_mode_config_init(drm);
 
        bufsize = (st7586_mode.vdisplay + 2) / 3 * st7586_mode.hdisplay;
 
index 3cd9b8d9888d639607da4701ae9fbad4f17a34d1..0af1b15efdf8a284f321057b6e22fa5d6fb0fbfb 100644 (file)
@@ -21,6 +21,7 @@
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_mipi_dbi.h>
 
 #define ST7735R_FRMCTR1                0xb1
@@ -156,7 +157,6 @@ DEFINE_DRM_GEM_CMA_FOPS(st7735r_fops);
 static struct drm_driver st7735r_driver = {
        .driver_features        = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
        .fops                   = &st7735r_fops,
-       .release                = mipi_dbi_release,
        DRM_GEM_CMA_VMAP_DRIVER_OPS,
        .debugfs_init           = mipi_dbi_debugfs_init,
        .name                   = "st7735r",
@@ -195,22 +195,16 @@ static int st7735r_probe(struct spi_device *spi)
        if (!cfg)
                cfg = (void *)spi_get_device_id(spi)->driver_data;
 
-       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-       if (!priv)
-               return -ENOMEM;
+       priv = devm_drm_dev_alloc(dev, &st7735r_driver,
+                                 struct st7735r_priv, dbidev.drm);
+       if (IS_ERR(priv))
+               return PTR_ERR(priv);
 
        dbidev = &priv->dbidev;
        priv->cfg = cfg;
 
        dbi = &dbidev->dbi;
        drm = &dbidev->drm;
-       ret = devm_drm_dev_init(dev, drm, &st7735r_driver);
-       if (ret) {
-               kfree(dbidev);
-               return ret;
-       }
-
-       drm_mode_config_init(drm);
 
        dbi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
        if (IS_ERR(dbi->reset)) {
index 9e07c3f75156ba1d7a48bccea82528eca1c7359b..f73b81c2576e1c05f65a9020a0934a5c8ddd35f5 100644 (file)
@@ -588,7 +588,8 @@ static void ttm_bo_release(struct kref *kref)
                ttm_mem_io_unlock(man);
        }
 
-       if (!dma_resv_test_signaled_rcu(bo->base.resv, true)) {
+       if (!dma_resv_test_signaled_rcu(bo->base.resv, true) ||
+           !dma_resv_trylock(bo->base.resv)) {
                /* The BO is not idle, resurrect it for delayed destroy */
                ttm_bo_flush_all_fences(bo);
                bo->deleted = true;
@@ -621,6 +622,7 @@ static void ttm_bo_release(struct kref *kref)
        spin_unlock(&ttm_bo_glob.lru_lock);
 
        ttm_bo_cleanup_memtype_use(bo);
+       dma_resv_unlock(bo->base.resv);
 
        BUG_ON(bo->mem.mm_node != NULL);
        atomic_dec(&ttm_bo_glob.bo_count);
index 0afdfb0d1fe1197d78115cff72830c1848842a23..cdc1c42e16695215f28b6f4acec56a0a85e4eea0 100644 (file)
@@ -59,7 +59,7 @@ static int udl_get_modes(struct drm_connector *connector)
 static enum drm_mode_status udl_mode_valid(struct drm_connector *connector,
                          struct drm_display_mode *mode)
 {
-       struct udl_device *udl = connector->dev->dev_private;
+       struct udl_device *udl = to_udl(connector->dev);
        if (!udl->sku_pixel_limit)
                return 0;
 
@@ -72,7 +72,7 @@ static enum drm_mode_status udl_mode_valid(struct drm_connector *connector,
 static enum drm_connector_status
 udl_detect(struct drm_connector *connector, bool force)
 {
-       struct udl_device *udl = connector->dev->dev_private;
+       struct udl_device *udl = to_udl(connector->dev);
        struct udl_drm_connector *udl_connector =
                                        container_of(connector,
                                        struct udl_drm_connector,
index e6c1cd77d4d4a0d3f803364209c19da309e65b97..d1aa50fd6d65ab2fedcd7765b9f2928f4ec0f600 100644 (file)
@@ -10,6 +10,7 @@
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_file.h>
 #include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_ioctl.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_print.h>
@@ -33,17 +34,8 @@ static int udl_usb_resume(struct usb_interface *interface)
 
 DEFINE_DRM_GEM_FOPS(udl_driver_fops);
 
-static void udl_driver_release(struct drm_device *dev)
-{
-       udl_fini(dev);
-       udl_modeset_cleanup(dev);
-       drm_dev_fini(dev);
-       kfree(dev);
-}
-
 static struct drm_driver driver = {
        .driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET,
-       .release = udl_driver_release,
 
        /* gem hooks */
        .gem_create_object = udl_driver_gem_create_object,
@@ -65,27 +57,19 @@ static struct udl_device *udl_driver_create(struct usb_interface *interface)
        struct udl_device *udl;
        int r;
 
-       udl = kzalloc(sizeof(*udl), GFP_KERNEL);
-       if (!udl)
-               return ERR_PTR(-ENOMEM);
-
-       r = drm_dev_init(&udl->drm, &driver, &interface->dev);
-       if (r) {
-               kfree(udl);
-               return ERR_PTR(r);
-       }
+       udl = devm_drm_dev_alloc(&interface->dev, &driver,
+                                struct udl_device, drm);
+       if (IS_ERR(udl))
+               return udl;
 
        udl->udev = udev;
-       udl->drm.dev_private = udl;
 
        r = udl_init(udl);
-       if (r) {
-               drm_dev_fini(&udl->drm);
-               kfree(udl);
+       if (r)
                return ERR_PTR(r);
-       }
 
        usb_set_intfdata(interface, udl);
+
        return udl;
 }
 
@@ -101,31 +85,22 @@ static int udl_usb_probe(struct usb_interface *interface,
 
        r = drm_dev_register(&udl->drm, 0);
        if (r)
-               goto err_free;
+               return r;
 
        DRM_INFO("Initialized udl on minor %d\n", udl->drm.primary->index);
 
-       r = drm_fbdev_generic_setup(&udl->drm, 0);
-       if (r)
-               goto err_drm_dev_unregister;
+       drm_fbdev_generic_setup(&udl->drm, 0);
 
        return 0;
-
-err_drm_dev_unregister:
-       drm_dev_unregister(&udl->drm);
-err_free:
-       drm_dev_put(&udl->drm);
-       return r;
 }
 
 static void udl_usb_disconnect(struct usb_interface *interface)
 {
        struct drm_device *dev = usb_get_intfdata(interface);
 
-       drm_kms_helper_poll_disable(dev);
+       drm_kms_helper_poll_fini(dev);
        udl_drop_usb(dev);
        drm_dev_unplug(dev);
-       drm_dev_put(dev);
 }
 
 /*
index e67227c44cc444eb0bc3a9cfb1479ae4a4111ab8..2642f94a63fc8e80d82c47750b528898b5d183b4 100644 (file)
@@ -68,7 +68,6 @@ struct udl_device {
 
 /* modeset */
 int udl_modeset_init(struct drm_device *dev);
-void udl_modeset_cleanup(struct drm_device *dev);
 struct drm_connector *udl_connector_init(struct drm_device *dev);
 
 struct urb *udl_get_urb(struct drm_device *dev);
@@ -77,7 +76,6 @@ int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len);
 void udl_urb_completion(struct urb *urb);
 
 int udl_init(struct udl_device *udl);
-void udl_fini(struct drm_device *dev);
 
 int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
                     const char *front, char **urb_buf_ptr,
index 538718919916a7c7247009657a74ce90230f0a7a..f5d27f2a5654341ca1d6deb9f066165a5613de62 100644 (file)
@@ -351,13 +351,3 @@ int udl_drop_usb(struct drm_device *dev)
        udl_free_urb_list(dev);
        return 0;
 }
-
-void udl_fini(struct drm_device *dev)
-{
-       struct udl_device *udl = to_udl(dev);
-
-       drm_kms_helper_poll_fini(dev);
-
-       if (udl->urbs.count)
-               udl_free_urb_list(dev);
-}
index d59ebac70b150482c3f386d2f4ed54ac2a925f3b..fef43f4e3bac4f0b74f7922d23770c22e349cb01 100644 (file)
@@ -215,7 +215,7 @@ static char *udl_dummy_render(char *wrptr)
 static int udl_crtc_write_mode_to_hw(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct udl_device *udl = dev->dev_private;
+       struct udl_device *udl = to_udl(dev);
        struct urb *urb;
        char *buf;
        int retval;
@@ -266,8 +266,8 @@ static int udl_aligned_damage_clip(struct drm_rect *clip, int x, int y,
        return 0;
 }
 
-int udl_handle_damage(struct drm_framebuffer *fb, int x, int y,
-                     int width, int height)
+static int udl_handle_damage(struct drm_framebuffer *fb, int x, int y,
+                            int width, int height)
 {
        struct drm_device *dev = fb->dev;
        struct dma_buf_attachment *import_attach = fb->obj[0]->import_attach;
@@ -369,7 +369,7 @@ udl_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe,
        struct drm_crtc *crtc = &pipe->crtc;
        struct drm_device *dev = crtc->dev;
        struct drm_framebuffer *fb = plane_state->fb;
-       struct udl_device *udl = dev->dev_private;
+       struct udl_device *udl = to_udl(dev);
        struct drm_display_mode *mode = &crtc_state->mode;
        char *buf;
        char *wrptr;
@@ -464,11 +464,13 @@ static const struct drm_mode_config_funcs udl_mode_funcs = {
 int udl_modeset_init(struct drm_device *dev)
 {
        size_t format_count = ARRAY_SIZE(udl_simple_display_pipe_formats);
-       struct udl_device *udl = dev->dev_private;
+       struct udl_device *udl = to_udl(dev);
        struct drm_connector *connector;
        int ret;
 
-       drm_mode_config_init(dev);
+       ret = drmm_mode_config_init(dev);
+       if (ret)
+               return ret;
 
        dev->mode_config.min_width = 640;
        dev->mode_config.min_height = 480;
@@ -482,10 +484,8 @@ int udl_modeset_init(struct drm_device *dev)
        dev->mode_config.funcs = &udl_mode_funcs;
 
        connector = udl_connector_init(dev);
-       if (IS_ERR(connector)) {
-               ret = PTR_ERR(connector);
-               goto err_drm_mode_config_cleanup;
-       }
+       if (IS_ERR(connector))
+               return PTR_ERR(connector);
 
        format_count = ARRAY_SIZE(udl_simple_display_pipe_formats);
 
@@ -494,18 +494,9 @@ int udl_modeset_init(struct drm_device *dev)
                                           udl_simple_display_pipe_formats,
                                           format_count, NULL, connector);
        if (ret)
-               goto err_drm_mode_config_cleanup;
+               return ret;
 
        drm_mode_config_reset(dev);
 
        return 0;
-
-err_drm_mode_config_cleanup:
-       drm_mode_config_cleanup(dev);
-       return ret;
-}
-
-void udl_modeset_cleanup(struct drm_device *dev)
-{
-       drm_mode_config_cleanup(dev);
 }
index 9e953ce64ef753632a644c0ea8ac87433951ff5b..e76b24bb88285e31c28d7ed4d378891fbf8ef2e8 100644 (file)
@@ -132,7 +132,7 @@ static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused)
        u32 ident0, ident1, ident2, ident3, cores;
        int ret, core;
 
-       ret = pm_runtime_get_sync(v3d->dev);
+       ret = pm_runtime_get_sync(v3d->drm.dev);
        if (ret < 0)
                return ret;
 
@@ -187,8 +187,8 @@ static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused)
                           (misccfg & V3D_MISCCFG_OVRTMUOUT) != 0);
        }
 
-       pm_runtime_mark_last_busy(v3d->dev);
-       pm_runtime_put_autosuspend(v3d->dev);
+       pm_runtime_mark_last_busy(v3d->drm.dev);
+       pm_runtime_put_autosuspend(v3d->drm.dev);
 
        return 0;
 }
@@ -219,7 +219,7 @@ static int v3d_measure_clock(struct seq_file *m, void *unused)
        int measure_ms = 1000;
        int ret;
 
-       ret = pm_runtime_get_sync(v3d->dev);
+       ret = pm_runtime_get_sync(v3d->drm.dev);
        if (ret < 0)
                return ret;
 
@@ -245,8 +245,8 @@ static int v3d_measure_clock(struct seq_file *m, void *unused)
                   cycles / (measure_ms * 1000),
                   (cycles / (measure_ms * 100)) % 10);
 
-       pm_runtime_mark_last_busy(v3d->dev);
-       pm_runtime_put_autosuspend(v3d->dev);
+       pm_runtime_mark_last_busy(v3d->drm.dev);
+       pm_runtime_put_autosuspend(v3d->drm.dev);
 
        return 0;
 }
@@ -258,10 +258,10 @@ static const struct drm_info_list v3d_debugfs_list[] = {
        {"bo_stats", v3d_debugfs_bo_stats, 0},
 };
 
-int
+void
 v3d_debugfs_init(struct drm_minor *minor)
 {
-       return drm_debugfs_create_files(v3d_debugfs_list,
-                                       ARRAY_SIZE(v3d_debugfs_list),
-                                       minor->debugfs_root, minor);
+       drm_debugfs_create_files(v3d_debugfs_list,
+                                ARRAY_SIZE(v3d_debugfs_list),
+                                minor->debugfs_root, minor);
 }
index eaa8e9682373e8d40c3a97dd18dea8b4d9baa41b..82a7dfdd14c2a76c73babbc3ed7ef0afda5b99cc 100644 (file)
@@ -25,6 +25,7 @@
 #include <drm/drm_drv.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_fb_helper.h>
+#include <drm/drm_managed.h>
 #include <uapi/drm/v3d_drm.h>
 
 #include "v3d_drv.h"
@@ -104,7 +105,7 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
                if (args->value != 0)
                        return -EINVAL;
 
-               ret = pm_runtime_get_sync(v3d->dev);
+               ret = pm_runtime_get_sync(v3d->drm.dev);
                if (ret < 0)
                        return ret;
                if (args->param >= DRM_V3D_PARAM_V3D_CORE0_IDENT0 &&
@@ -113,8 +114,8 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
                } else {
                        args->value = V3D_READ(offset);
                }
-               pm_runtime_mark_last_busy(v3d->dev);
-               pm_runtime_put_autosuspend(v3d->dev);
+               pm_runtime_mark_last_busy(v3d->drm.dev);
+               pm_runtime_put_autosuspend(v3d->drm.dev);
                return 0;
        }
 
@@ -234,9 +235,9 @@ static int
 map_regs(struct v3d_dev *v3d, void __iomem **regs, const char *name)
 {
        struct resource *res =
-               platform_get_resource_byname(v3d->pdev, IORESOURCE_MEM, name);
+               platform_get_resource_byname(v3d_to_pdev(v3d), IORESOURCE_MEM, name);
 
-       *regs = devm_ioremap_resource(v3d->dev, res);
+       *regs = devm_ioremap_resource(v3d->drm.dev, res);
        return PTR_ERR_OR_ZERO(*regs);
 }
 
@@ -250,20 +251,21 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
        u32 ident1;
 
 
-       v3d = kzalloc(sizeof(*v3d), GFP_KERNEL);
-       if (!v3d)
-               return -ENOMEM;
-       v3d->dev = dev;
-       v3d->pdev = pdev;
+       v3d = devm_drm_dev_alloc(dev, &v3d_drm_driver, struct v3d_dev, drm);
+       if (IS_ERR(v3d))
+               return PTR_ERR(v3d);
+
        drm = &v3d->drm;
 
+       platform_set_drvdata(pdev, drm);
+
        ret = map_regs(v3d, &v3d->hub_regs, "hub");
        if (ret)
-               goto dev_free;
+               return ret;
 
        ret = map_regs(v3d, &v3d->core_regs[0], "core0");
        if (ret)
-               goto dev_free;
+               return ret;
 
        mmu_debug = V3D_READ(V3D_MMU_DEBUG_INFO);
        dev->coherent_dma_mask =
@@ -281,45 +283,37 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
                ret = PTR_ERR(v3d->reset);
 
                if (ret == -EPROBE_DEFER)
-                       goto dev_free;
+                       return ret;
 
                v3d->reset = NULL;
                ret = map_regs(v3d, &v3d->bridge_regs, "bridge");
                if (ret) {
                        dev_err(dev,
                                "Failed to get reset control or bridge regs\n");
-                       goto dev_free;
+                       return ret;
                }
        }
 
        if (v3d->ver < 41) {
                ret = map_regs(v3d, &v3d->gca_regs, "gca");
                if (ret)
-                       goto dev_free;
+                       return ret;
        }
 
        v3d->mmu_scratch = dma_alloc_wc(dev, 4096, &v3d->mmu_scratch_paddr,
                                        GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
        if (!v3d->mmu_scratch) {
                dev_err(dev, "Failed to allocate MMU scratch page\n");
-               ret = -ENOMEM;
-               goto dev_free;
+               return -ENOMEM;
        }
 
        pm_runtime_use_autosuspend(dev);
        pm_runtime_set_autosuspend_delay(dev, 50);
        pm_runtime_enable(dev);
 
-       ret = drm_dev_init(&v3d->drm, &v3d_drm_driver, dev);
-       if (ret)
-               goto dma_free;
-
-       platform_set_drvdata(pdev, drm);
-       drm->dev_private = v3d;
-
        ret = v3d_gem_init(drm);
        if (ret)
-               goto dev_destroy;
+               goto dma_free;
 
        ret = v3d_irq_init(v3d);
        if (ret)
@@ -335,12 +329,8 @@ irq_disable:
        v3d_irq_disable(v3d);
 gem_destroy:
        v3d_gem_destroy(drm);
-dev_destroy:
-       drm_dev_put(drm);
 dma_free:
        dma_free_wc(dev, 4096, v3d->mmu_scratch, v3d->mmu_scratch_paddr);
-dev_free:
-       kfree(v3d);
        return ret;
 }
 
@@ -353,9 +343,8 @@ static int v3d_platform_drm_remove(struct platform_device *pdev)
 
        v3d_gem_destroy(drm);
 
-       drm_dev_put(drm);
-
-       dma_free_wc(v3d->dev, 4096, v3d->mmu_scratch, v3d->mmu_scratch_paddr);
+       dma_free_wc(v3d->drm.dev, 4096, v3d->mmu_scratch,
+                   v3d->mmu_scratch_paddr);
 
        return 0;
 }
index ac260333458738379b057618db5f4b11542360f2..8a390738d65baf20189d799d1481d4cd21f636f0 100644 (file)
@@ -14,7 +14,6 @@
 #include "uapi/drm/v3d_drm.h"
 
 struct clk;
-struct device;
 struct platform_device;
 struct reset_control;
 
@@ -47,8 +46,6 @@ struct v3d_dev {
        int ver;
        bool single_irq_line;
 
-       struct device *dev;
-       struct platform_device *pdev;
        void __iomem *hub_regs;
        void __iomem *core_regs[3];
        void __iomem *bridge_regs;
@@ -121,7 +118,7 @@ struct v3d_dev {
 static inline struct v3d_dev *
 to_v3d_dev(struct drm_device *dev)
 {
-       return (struct v3d_dev *)dev->dev_private;
+       return container_of(dev, struct v3d_dev, drm);
 }
 
 static inline bool
@@ -130,6 +127,8 @@ v3d_has_csd(struct v3d_dev *v3d)
        return v3d->ver >= 41;
 }
 
+#define v3d_to_pdev(v3d) to_platform_device((v3d)->drm.dev)
+
 /* The per-fd struct, which tracks the MMU mappings. */
 struct v3d_file_priv {
        struct v3d_dev *v3d;
@@ -316,7 +315,7 @@ struct drm_gem_object *v3d_prime_import_sg_table(struct drm_device *dev,
                                                 struct sg_table *sgt);
 
 /* v3d_debugfs.c */
-int v3d_debugfs_init(struct drm_minor *minor);
+void v3d_debugfs_init(struct drm_minor *minor);
 
 /* v3d_fence.c */
 extern const struct dma_fence_ops v3d_fence_ops;
index 549dde83408b0ffed1b0204702978b351e17917a..09a7639cf161f59fcfe34d23ebea036838801315 100644 (file)
@@ -370,8 +370,8 @@ v3d_job_free(struct kref *ref)
        dma_fence_put(job->irq_fence);
        dma_fence_put(job->done_fence);
 
-       pm_runtime_mark_last_busy(job->v3d->dev);
-       pm_runtime_put_autosuspend(job->v3d->dev);
+       pm_runtime_mark_last_busy(job->v3d->drm.dev);
+       pm_runtime_put_autosuspend(job->v3d->drm.dev);
 
        kfree(job);
 }
@@ -439,7 +439,7 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
        job->v3d = v3d;
        job->free = free;
 
-       ret = pm_runtime_get_sync(v3d->dev);
+       ret = pm_runtime_get_sync(v3d->drm.dev);
        if (ret < 0)
                return ret;
 
@@ -458,7 +458,7 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
        return 0;
 fail:
        xa_destroy(&job->deps);
-       pm_runtime_put_autosuspend(v3d->dev);
+       pm_runtime_put_autosuspend(v3d->drm.dev);
        return ret;
 }
 
@@ -886,12 +886,12 @@ v3d_gem_init(struct drm_device *dev)
         */
        drm_mm_init(&v3d->mm, 1, pt_size / sizeof(u32) - 1);
 
-       v3d->pt = dma_alloc_wc(v3d->dev, pt_size,
+       v3d->pt = dma_alloc_wc(v3d->drm.dev, pt_size,
                               &v3d->pt_paddr,
                               GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
        if (!v3d->pt) {
                drm_mm_takedown(&v3d->mm);
-               dev_err(v3d->dev,
+               dev_err(v3d->drm.dev,
                        "Failed to allocate page tables. "
                        "Please ensure you have CMA enabled.\n");
                return -ENOMEM;
@@ -903,7 +903,7 @@ v3d_gem_init(struct drm_device *dev)
        ret = v3d_sched_init(v3d);
        if (ret) {
                drm_mm_takedown(&v3d->mm);
-               dma_free_coherent(v3d->dev, 4096 * 1024, (void *)v3d->pt,
+               dma_free_coherent(v3d->drm.dev, 4096 * 1024, (void *)v3d->pt,
                                  v3d->pt_paddr);
        }
 
@@ -925,5 +925,6 @@ v3d_gem_destroy(struct drm_device *dev)
 
        drm_mm_takedown(&v3d->mm);
 
-       dma_free_coherent(v3d->dev, 4096 * 1024, (void *)v3d->pt, v3d->pt_paddr);
+       dma_free_coherent(v3d->drm.dev, 4096 * 1024, (void *)v3d->pt,
+                         v3d->pt_paddr);
 }
index 662e67279a7bb40554a02e68588e6e538556c8af..51b65263c6571a2b088230e94c989482bd88cfcb 100644 (file)
@@ -128,7 +128,7 @@ v3d_irq(int irq, void *arg)
         * always-allowed mode.
         */
        if (intsts & V3D_INT_GMPV)
-               dev_err(v3d->dev, "GMP violation\n");
+               dev_err(v3d->drm.dev, "GMP violation\n");
 
        /* V3D 4.2 wires the hub and core IRQs together, so if we &
         * didn't see the common one then check hub for MMU IRQs.
@@ -189,7 +189,7 @@ v3d_hub_irq(int irq, void *arg)
                                client = v3d41_axi_ids[axi_id];
                }
 
-               dev_err(v3d->dev, "MMU error from client %s (%d) at 0x%llx%s%s%s\n",
+               dev_err(v3d->drm.dev, "MMU error from client %s (%d) at 0x%llx%s%s%s\n",
                        client, axi_id, (long long)vio_addr,
                        ((intsts & V3D_HUB_INT_MMU_WRV) ?
                         ", write violation" : ""),
@@ -217,16 +217,17 @@ v3d_irq_init(struct v3d_dev *v3d)
                V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
        V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
 
-       irq1 = platform_get_irq(v3d->pdev, 1);
+       irq1 = platform_get_irq(v3d_to_pdev(v3d), 1);
        if (irq1 == -EPROBE_DEFER)
                return irq1;
        if (irq1 > 0) {
-               ret = devm_request_irq(v3d->dev, irq1,
+               ret = devm_request_irq(v3d->drm.dev, irq1,
                                       v3d_irq, IRQF_SHARED,
                                       "v3d_core0", v3d);
                if (ret)
                        goto fail;
-               ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0),
+               ret = devm_request_irq(v3d->drm.dev,
+                                      platform_get_irq(v3d_to_pdev(v3d), 0),
                                       v3d_hub_irq, IRQF_SHARED,
                                       "v3d_hub", v3d);
                if (ret)
@@ -234,7 +235,8 @@ v3d_irq_init(struct v3d_dev *v3d)
        } else {
                v3d->single_irq_line = true;
 
-               ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0),
+               ret = devm_request_irq(v3d->drm.dev,
+                                      platform_get_irq(v3d_to_pdev(v3d), 0),
                                       v3d_irq, IRQF_SHARED,
                                       "v3d", v3d);
                if (ret)
@@ -246,7 +248,7 @@ v3d_irq_init(struct v3d_dev *v3d)
 
 fail:
        if (ret != -EPROBE_DEFER)
-               dev_err(v3d->dev, "IRQ setup failed: %d\n", ret);
+               dev_err(v3d->drm.dev, "IRQ setup failed: %d\n", ret);
        return ret;
 }
 
index 395e81d97163211196dc8d9a5ef7817e8999a9fd..3b81ea28c0bbc8d1a13bde4e025c47be296c176f 100644 (file)
@@ -40,7 +40,7 @@ static int v3d_mmu_flush_all(struct v3d_dev *v3d)
        ret = wait_for(!(V3D_READ(V3D_MMU_CTL) &
                         V3D_MMU_CTL_TLB_CLEARING), 100);
        if (ret)
-               dev_err(v3d->dev, "TLB clear wait idle pre-wait failed\n");
+               dev_err(v3d->drm.dev, "TLB clear wait idle pre-wait failed\n");
 
        V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL) |
                  V3D_MMU_CTL_TLB_CLEAR);
@@ -52,14 +52,14 @@ static int v3d_mmu_flush_all(struct v3d_dev *v3d)
        ret = wait_for(!(V3D_READ(V3D_MMU_CTL) &
                         V3D_MMU_CTL_TLB_CLEARING), 100);
        if (ret) {
-               dev_err(v3d->dev, "TLB clear wait idle failed\n");
+               dev_err(v3d->drm.dev, "TLB clear wait idle failed\n");
                return ret;
        }
 
        ret = wait_for(!(V3D_READ(V3D_MMUC_CONTROL) &
                         V3D_MMUC_CONTROL_FLUSHING), 100);
        if (ret)
-               dev_err(v3d->dev, "MMUC flush wait idle failed\n");
+               dev_err(v3d->drm.dev, "MMUC flush wait idle failed\n");
 
        return ret;
 }
@@ -109,7 +109,7 @@ void v3d_mmu_insert_ptes(struct v3d_bo *bo)
                     shmem_obj->base.size >> V3D_MMU_PAGE_SHIFT);
 
        if (v3d_mmu_flush_all(v3d))
-               dev_err(v3d->dev, "MMU flush timeout\n");
+               dev_err(v3d->drm.dev, "MMU flush timeout\n");
 }
 
 void v3d_mmu_remove_ptes(struct v3d_bo *bo)
@@ -122,5 +122,5 @@ void v3d_mmu_remove_ptes(struct v3d_bo *bo)
                v3d->pt[page] = 0;
 
        if (v3d_mmu_flush_all(v3d))
-               dev_err(v3d->dev, "MMU flush timeout\n");
+               dev_err(v3d->drm.dev, "MMU flush timeout\n");
 }
index 8c2df6d95283e46598d18c66e75c14c80bd3ee5c..0747614a78f0bc4bed7c1f3734c14655a0346415 100644 (file)
@@ -403,7 +403,7 @@ v3d_sched_init(struct v3d_dev *v3d)
                             msecs_to_jiffies(hang_limit_ms),
                             "v3d_bin");
        if (ret) {
-               dev_err(v3d->dev, "Failed to create bin scheduler: %d.", ret);
+               dev_err(v3d->drm.dev, "Failed to create bin scheduler: %d.", ret);
                return ret;
        }
 
@@ -413,7 +413,7 @@ v3d_sched_init(struct v3d_dev *v3d)
                             msecs_to_jiffies(hang_limit_ms),
                             "v3d_render");
        if (ret) {
-               dev_err(v3d->dev, "Failed to create render scheduler: %d.",
+               dev_err(v3d->drm.dev, "Failed to create render scheduler: %d.",
                        ret);
                v3d_sched_fini(v3d);
                return ret;
@@ -425,7 +425,7 @@ v3d_sched_init(struct v3d_dev *v3d)
                             msecs_to_jiffies(hang_limit_ms),
                             "v3d_tfu");
        if (ret) {
-               dev_err(v3d->dev, "Failed to create TFU scheduler: %d.",
+               dev_err(v3d->drm.dev, "Failed to create TFU scheduler: %d.",
                        ret);
                v3d_sched_fini(v3d);
                return ret;
@@ -438,7 +438,7 @@ v3d_sched_init(struct v3d_dev *v3d)
                                     msecs_to_jiffies(hang_limit_ms),
                                     "v3d_csd");
                if (ret) {
-                       dev_err(v3d->dev, "Failed to create CSD scheduler: %d.",
+                       dev_err(v3d->drm.dev, "Failed to create CSD scheduler: %d.",
                                ret);
                        v3d_sched_fini(v3d);
                        return ret;
@@ -450,7 +450,7 @@ v3d_sched_init(struct v3d_dev *v3d)
                                     msecs_to_jiffies(hang_limit_ms),
                                     "v3d_cache_clean");
                if (ret) {
-                       dev_err(v3d->dev, "Failed to create CACHE_CLEAN scheduler: %d.",
+                       dev_err(v3d->drm.dev, "Failed to create CACHE_CLEAN scheduler: %d.",
                                ret);
                        v3d_sched_fini(v3d);
                        return ret;
index ac8f75db2ecd8ae5aa892946898815d66bafb423..cf2e3e6a23881a55157ba399ec788438a3652fd4 100644 (file)
@@ -17,6 +17,7 @@
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_file.h>
 #include <drm/drm_ioctl.h>
+#include <drm/drm_managed.h>
 
 #include "vbox_drv.h"
 
@@ -45,28 +46,22 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (ret)
                return ret;
 
-       vbox = kzalloc(sizeof(*vbox), GFP_KERNEL);
-       if (!vbox)
-               return -ENOMEM;
-
-       ret = drm_dev_init(&vbox->ddev, &driver, &pdev->dev);
-       if (ret) {
-               kfree(vbox);
-               return ret;
-       }
+       vbox = devm_drm_dev_alloc(&pdev->dev, &driver,
+                                 struct vbox_private, ddev);
+       if (IS_ERR(vbox))
+               return PTR_ERR(vbox);
 
        vbox->ddev.pdev = pdev;
-       vbox->ddev.dev_private = vbox;
        pci_set_drvdata(pdev, vbox);
        mutex_init(&vbox->hw_mutex);
 
-       ret = pci_enable_device(pdev);
+       ret = pcim_enable_device(pdev);
        if (ret)
-               goto err_dev_put;
+               return ret;
 
        ret = vbox_hw_init(vbox);
        if (ret)
-               goto err_pci_disable;
+               return ret;
 
        ret = vbox_mm_init(vbox);
        if (ret)
@@ -80,14 +75,12 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (ret)
                goto err_mode_fini;
 
-       ret = drm_fbdev_generic_setup(&vbox->ddev, 32);
-       if (ret)
-               goto err_irq_fini;
-
        ret = drm_dev_register(&vbox->ddev, 0);
        if (ret)
                goto err_irq_fini;
 
+       drm_fbdev_generic_setup(&vbox->ddev, 32);
+
        return 0;
 
 err_irq_fini:
@@ -98,10 +91,6 @@ err_mm_fini:
        vbox_mm_fini(vbox);
 err_hw_fini:
        vbox_hw_fini(vbox);
-err_pci_disable:
-       pci_disable_device(pdev);
-err_dev_put:
-       drm_dev_put(&vbox->ddev);
        return ret;
 }
 
@@ -114,7 +103,6 @@ static void vbox_pci_remove(struct pci_dev *pdev)
        vbox_mode_fini(vbox);
        vbox_mm_fini(vbox);
        vbox_hw_fini(vbox);
-       drm_dev_put(&vbox->ddev);
 }
 
 #ifdef CONFIG_PM_SLEEP
index 87421903816c04fc123c3a280163d5a8b2c451ba..ac7c2effc46f4391667548136d259056f0b1c9e4 100644 (file)
@@ -127,6 +127,7 @@ struct vbox_encoder {
 #define to_vbox_crtc(x) container_of(x, struct vbox_crtc, base)
 #define to_vbox_connector(x) container_of(x, struct vbox_connector, base)
 #define to_vbox_encoder(x) container_of(x, struct vbox_encoder, base)
+#define to_vbox_dev(x) container_of(x, struct vbox_private, ddev)
 
 bool vbox_check_supported(u16 id);
 int vbox_hw_init(struct vbox_private *vbox);
index 16a1e29f5292ce4d1e745749c3bc56b8d678062f..631657fa554f9a1a5b6104064697f7eec8b6c8fa 100644 (file)
@@ -34,7 +34,7 @@ void vbox_report_hotplug(struct vbox_private *vbox)
 irqreturn_t vbox_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = (struct drm_device *)arg;
-       struct vbox_private *vbox = (struct vbox_private *)dev->dev_private;
+       struct vbox_private *vbox = to_vbox_dev(dev);
        u32 host_flags = vbox_get_flags(vbox);
 
        if (!(host_flags & HGSMIHOSTFLAGS_IRQ))
index 9dcab115a26110fc541f9e96ef985833f878ab86..d68d9bad76747015c446b3f5908014ca81dd3b70 100644 (file)
@@ -71,8 +71,6 @@ static void vbox_accel_fini(struct vbox_private *vbox)
 
        for (i = 0; i < vbox->num_crtcs; ++i)
                vbva_disable(&vbox->vbva_info[i], vbox->guest_pool, i);
-
-       pci_iounmap(vbox->ddev.pdev, vbox->vbva_buffers);
 }
 
 /* Do we support the 4.3 plus mode hint reporting interface? */
@@ -123,21 +121,22 @@ int vbox_hw_init(struct vbox_private *vbox)
                return -ENOMEM;
 
        /* Create guest-heap mem-pool use 2^4 = 16 byte chunks */
-       vbox->guest_pool = gen_pool_create(4, -1);
+       vbox->guest_pool = devm_gen_pool_create(vbox->ddev.dev, 4, -1,
+                                               "vboxvideo-accel");
        if (!vbox->guest_pool)
-               goto err_unmap_guest_heap;
+               return -ENOMEM;
 
        ret = gen_pool_add_virt(vbox->guest_pool,
                                (unsigned long)vbox->guest_heap,
                                GUEST_HEAP_OFFSET(vbox),
                                GUEST_HEAP_USABLE_SIZE, -1);
        if (ret)
-               goto err_destroy_guest_pool;
+               return ret;
 
        ret = hgsmi_test_query_conf(vbox->guest_pool);
        if (ret) {
                DRM_ERROR("vboxvideo: hgsmi_test_query_conf failed\n");
-               goto err_destroy_guest_pool;
+               return ret;
        }
 
        /* Reduce available VRAM size to reflect the guest heap. */
@@ -149,33 +148,23 @@ int vbox_hw_init(struct vbox_private *vbox)
 
        if (!have_hgsmi_mode_hints(vbox)) {
                ret = -ENOTSUPP;
-               goto err_destroy_guest_pool;
+               return ret;
        }
 
        vbox->last_mode_hints = devm_kcalloc(vbox->ddev.dev, vbox->num_crtcs,
                                             sizeof(struct vbva_modehint),
                                             GFP_KERNEL);
-       if (!vbox->last_mode_hints) {
-               ret = -ENOMEM;
-               goto err_destroy_guest_pool;
-       }
+       if (!vbox->last_mode_hints)
+               return -ENOMEM;
 
        ret = vbox_accel_init(vbox);
        if (ret)
-               goto err_destroy_guest_pool;
+               return ret;
 
        return 0;
-
-err_destroy_guest_pool:
-       gen_pool_destroy(vbox->guest_pool);
-err_unmap_guest_heap:
-       pci_iounmap(vbox->ddev.pdev, vbox->guest_heap);
-       return ret;
 }
 
 void vbox_hw_fini(struct vbox_private *vbox)
 {
        vbox_accel_fini(vbox);
-       gen_pool_destroy(vbox->guest_pool);
-       pci_iounmap(vbox->ddev.pdev, vbox->guest_heap);
 }
index 0883a435e62b9c9e2dee84fe30145f72e73f0086..d9a5af62af890abc2cbb860da6aabb17109af54e 100644 (file)
@@ -36,7 +36,7 @@ static void vbox_do_modeset(struct drm_crtc *crtc)
        u16 flags;
        s32 x_offset, y_offset;
 
-       vbox = crtc->dev->dev_private;
+       vbox = to_vbox_dev(crtc->dev);
        width = vbox_crtc->width ? vbox_crtc->width : 640;
        height = vbox_crtc->height ? vbox_crtc->height : 480;
        bpp = fb ? fb->format->cpp[0] * 8 : 32;
@@ -77,7 +77,7 @@ static void vbox_do_modeset(struct drm_crtc *crtc)
 static int vbox_set_view(struct drm_crtc *crtc)
 {
        struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
-       struct vbox_private *vbox = crtc->dev->dev_private;
+       struct vbox_private *vbox = to_vbox_dev(crtc->dev);
        struct vbva_infoview *p;
 
        /*
@@ -174,7 +174,7 @@ static void vbox_crtc_set_base_and_mode(struct drm_crtc *crtc,
                                        int x, int y)
 {
        struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(fb->obj[0]);
-       struct vbox_private *vbox = crtc->dev->dev_private;
+       struct vbox_private *vbox = to_vbox_dev(crtc->dev);
        struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
        bool needs_modeset = drm_atomic_crtc_needs_modeset(crtc->state);
 
@@ -272,7 +272,7 @@ static void vbox_primary_atomic_update(struct drm_plane *plane,
 {
        struct drm_crtc *crtc = plane->state->crtc;
        struct drm_framebuffer *fb = plane->state->fb;
-       struct vbox_private *vbox = fb->dev->dev_private;
+       struct vbox_private *vbox = to_vbox_dev(fb->dev);
        struct drm_mode_rect *clips;
        uint32_t num_clips, i;
 
@@ -704,7 +704,7 @@ static int vbox_get_modes(struct drm_connector *connector)
        int preferred_width, preferred_height;
 
        vbox_connector = to_vbox_connector(connector);
-       vbox = connector->dev->dev_private;
+       vbox = to_vbox_dev(connector->dev);
 
        hgsmi_report_flags_location(vbox->guest_pool, GUEST_HEAP_OFFSET(vbox) +
                                    HOST_FLAGS_OFFSET);
index 976423d0c3ccacc588fcf1e6a2a43e7b679b8736..f5a06675da43c67abcf187f7bddc9b95f88e5f3a 100644 (file)
@@ -24,25 +24,13 @@ int vbox_mm_init(struct vbox_private *vbox)
                return ret;
        }
 
-#ifdef DRM_MTRR_WC
-       vbox->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
-                                    pci_resource_len(dev->pdev, 0),
-                                    DRM_MTRR_WC);
-#else
        vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
                                         pci_resource_len(dev->pdev, 0));
-#endif
        return 0;
 }
 
 void vbox_mm_fini(struct vbox_private *vbox)
 {
-#ifdef DRM_MTRR_WC
-       drm_mtrr_del(vbox->fb_mtrr,
-                    pci_resource_start(vbox->ddev.pdev, 0),
-                    pci_resource_len(vbox->ddev.pdev, 0), DRM_MTRR_WC);
-#else
        arch_phys_wc_del(vbox->fb_mtrr);
-#endif
        drm_vram_helper_release_mm(&vbox->ddev);
 }
index b61b2d3407b516c2c819584652a9051ffa5cb53c..4fbbf980a299fd3255464d1c9cd88b17580d6fcc 100644 (file)
@@ -20,7 +20,7 @@ struct vc4_debugfs_info_entry {
  * Called at drm_dev_register() time on each of the minors registered
  * by the DRM device, to attach the debugfs files.
  */
-int
+void
 vc4_debugfs_init(struct drm_minor *minor)
 {
        struct vc4_dev *vc4 = to_vc4_dev(minor->dev);
@@ -30,14 +30,9 @@ vc4_debugfs_init(struct drm_minor *minor)
                            minor->debugfs_root, &vc4->load_tracker_enabled);
 
        list_for_each_entry(entry, &vc4->debugfs_list, link) {
-               int ret = drm_debugfs_create_files(&entry->info, 1,
-                                                  minor->debugfs_root, minor);
-
-               if (ret)
-                       return ret;
+               drm_debugfs_create_files(&entry->info, 1,
+                                        minor->debugfs_root, minor);
        }
-
-       return 0;
 }
 
 static int vc4_debugfs_regset32(struct seq_file *m, void *unused)
index 6dfede03396efb1906a529d97d0bc2174c69b654..a90f2545baee0c7e6b541cc1d957c7dad6c286a4 100644 (file)
@@ -17,6 +17,7 @@
 #include <drm/drm_of.h>
 #include <drm/drm_panel.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 #include <linux/clk.h>
 #include <linux/component.h>
 #include <linux/of_graph.h>
@@ -114,10 +115,6 @@ static const struct debugfs_reg32 dpi_regs[] = {
        VC4_REG32(DPI_ID),
 };
 
-static const struct drm_encoder_funcs vc4_dpi_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static void vc4_dpi_encoder_disable(struct drm_encoder *encoder)
 {
        struct vc4_dpi_encoder *vc4_encoder = to_vc4_dpi_encoder(encoder);
@@ -309,8 +306,7 @@ static int vc4_dpi_bind(struct device *dev, struct device *master, void *data)
        if (ret)
                DRM_ERROR("Failed to turn on core clock: %d\n", ret);
 
-       drm_encoder_init(drm, dpi->encoder, &vc4_dpi_encoder_funcs,
-                        DRM_MODE_ENCODER_DPI, NULL);
+       drm_simple_encoder_init(drm, dpi->encoder, DRM_MODE_ENCODER_DPI);
        drm_encoder_helper_add(dpi->encoder, &vc4_dpi_encoder_helper_funcs);
 
        ret = vc4_dpi_init_bridge(dpi);
index 139d25a8328e8e60927e687e36821484da64c100..3b1f02efefbe208b99622692137346b8abb900ce 100644 (file)
@@ -759,7 +759,7 @@ void vc4_crtc_get_margins(struct drm_crtc_state *state,
                          unsigned int *top, unsigned int *bottom);
 
 /* vc4_debugfs.c */
-int vc4_debugfs_init(struct drm_minor *minor);
+void vc4_debugfs_init(struct drm_minor *minor);
 #ifdef CONFIG_DEBUG_FS
 void vc4_debugfs_add_file(struct drm_device *drm,
                          const char *filename,
index d99b1d52665172abf055042034df49956101c388..eaf276978ee7fb79a145868071ddd91f86fbdd12 100644 (file)
@@ -37,6 +37,7 @@
 #include <drm/drm_of.h>
 #include <drm/drm_panel.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "vc4_drv.h"
 #include "vc4_regs.h"
@@ -652,15 +653,6 @@ static const struct debugfs_reg32 dsi1_regs[] = {
        VC4_REG32(DSI1_ID),
 };
 
-static void vc4_dsi_encoder_destroy(struct drm_encoder *encoder)
-{
-       drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs vc4_dsi_encoder_funcs = {
-       .destroy = vc4_dsi_encoder_destroy,
-};
-
 static void vc4_dsi_latch_ulps(struct vc4_dsi *dsi, bool latch)
 {
        u32 afec0 = DSI_PORT_READ(PHY_AFEC0);
@@ -1615,8 +1607,7 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
        if (dsi->port == 1)
                vc4->dsi1 = dsi;
 
-       drm_encoder_init(drm, dsi->encoder, &vc4_dsi_encoder_funcs,
-                        DRM_MODE_ENCODER_DSI, NULL);
+       drm_simple_encoder_init(drm, dsi->encoder, DRM_MODE_ENCODER_DSI);
        drm_encoder_helper_add(dsi->encoder, &vc4_dsi_encoder_helper_funcs);
 
        ret = drm_bridge_attach(dsi->encoder, dsi->bridge, NULL, 0);
@@ -1656,7 +1647,7 @@ static void vc4_dsi_unbind(struct device *dev, struct device *master,
         * normally.
         */
        list_splice_init(&dsi->bridge_chain, &dsi->encoder->bridge_chain);
-       vc4_dsi_encoder_destroy(dsi->encoder);
+       drm_encoder_cleanup(dsi->encoder);
 
        if (dsi->port == 1)
                vc4->dsi1 = NULL;
index 340719238753d2becd1d01be0d909a8644392f46..625bfcf52dc4daabeda6dc0acf0313e30bfff4cd 100644 (file)
@@ -34,6 +34,7 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 #include <linux/clk.h>
 #include <linux/component.h>
 #include <linux/i2c.h>
@@ -306,15 +307,6 @@ static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev,
        return connector;
 }
 
-static void vc4_hdmi_encoder_destroy(struct drm_encoder *encoder)
-{
-       drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs vc4_hdmi_encoder_funcs = {
-       .destroy = vc4_hdmi_encoder_destroy,
-};
-
 static int vc4_hdmi_stop_packet(struct drm_encoder *encoder,
                                enum hdmi_infoframe_type type)
 {
@@ -1406,8 +1398,7 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
        }
        pm_runtime_enable(dev);
 
-       drm_encoder_init(drm, hdmi->encoder, &vc4_hdmi_encoder_funcs,
-                        DRM_MODE_ENCODER_TMDS, NULL);
+       drm_simple_encoder_init(drm, hdmi->encoder, DRM_MODE_ENCODER_TMDS);
        drm_encoder_helper_add(hdmi->encoder, &vc4_hdmi_encoder_helper_funcs);
 
        hdmi->connector =
@@ -1465,7 +1456,7 @@ err_destroy_conn:
        vc4_hdmi_connector_destroy(hdmi->connector);
 #endif
 err_destroy_encoder:
-       vc4_hdmi_encoder_destroy(hdmi->encoder);
+       drm_encoder_cleanup(hdmi->encoder);
 err_unprepare_hsm:
        clk_disable_unprepare(hdmi->hsm_clock);
        pm_runtime_disable(dev);
@@ -1484,7 +1475,7 @@ static void vc4_hdmi_unbind(struct device *dev, struct device *master,
 
        cec_unregister_adapter(hdmi->cec_adap);
        vc4_hdmi_connector_destroy(hdmi->connector);
-       vc4_hdmi_encoder_destroy(hdmi->encoder);
+       drm_encoder_cleanup(hdmi->encoder);
 
        clk_disable_unprepare(hdmi->hsm_clock);
        pm_runtime_disable(dev);
index 7402bc768664ccd47d66e5168769784d52fd20b8..bd5b8eb58b180696b217f914d550af1419a2978d 100644 (file)
@@ -17,6 +17,7 @@
 #include <drm/drm_edid.h>
 #include <drm/drm_panel.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 #include <linux/clk.h>
 #include <linux/component.h>
 #include <linux/of_graph.h>
@@ -374,10 +375,6 @@ static struct drm_connector *vc4_vec_connector_init(struct drm_device *dev,
        return connector;
 }
 
-static const struct drm_encoder_funcs vc4_vec_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static void vc4_vec_encoder_disable(struct drm_encoder *encoder)
 {
        struct vc4_vec_encoder *vc4_vec_encoder = to_vc4_vec_encoder(encoder);
@@ -566,8 +563,7 @@ static int vc4_vec_bind(struct device *dev, struct device *master, void *data)
 
        pm_runtime_enable(dev);
 
-       drm_encoder_init(drm, vec->encoder, &vc4_vec_encoder_funcs,
-                        DRM_MODE_ENCODER_TVDAC, NULL);
+       drm_simple_encoder_init(drm, vec->encoder, DRM_MODE_ENCODER_TVDAC);
        drm_encoder_helper_add(vec->encoder, &vc4_vec_encoder_helper_funcs);
 
        vec->connector = vc4_vec_connector_init(drm, vec);
index 909eba43664a28f857070ec7090482f53727416d..ec1a8ebb6f1bfcad766f1a0effb2c305f9e11388 100644 (file)
@@ -39,6 +39,7 @@
 #include <drm/drm_drv.h>
 #include <drm/drm_file.h>
 #include <drm/drm_ioctl.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_prime.h>
 
 #include "vgem_drv.h"
@@ -431,9 +432,6 @@ static void vgem_release(struct drm_device *dev)
        struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm);
 
        platform_device_unregister(vgem->platform);
-       drm_dev_fini(&vgem->drm);
-
-       kfree(vgem);
 }
 
 static struct drm_driver vgem_driver = {
@@ -489,16 +487,19 @@ static int __init vgem_init(void)
                           &vgem_device->platform->dev);
        if (ret)
                goto out_unregister;
+       drmm_add_final_kfree(&vgem_device->drm, vgem_device);
 
        /* Final step: expose the device/driver to userspace */
-       ret  = drm_dev_register(&vgem_device->drm, 0);
+       ret = drm_dev_register(&vgem_device->drm, 0);
        if (ret)
-               goto out_fini;
+               goto out_put;
 
        return 0;
 
-out_fini:
-       drm_dev_fini(&vgem_device->drm);
+out_put:
+       drm_dev_put(&vgem_device->drm);
+       return ret;
+
 out_unregister:
        platform_device_unregister(vgem_device->platform);
 out_free:
index e27120d512b0614031a98eb3dc9ff03523ead7ee..3221520f61f0cc2ecb3f8030532e64ce2564ff04 100644 (file)
@@ -72,11 +72,10 @@ static struct drm_info_list virtio_gpu_debugfs_list[] = {
 
 #define VIRTIO_GPU_DEBUGFS_ENTRIES ARRAY_SIZE(virtio_gpu_debugfs_list)
 
-int
+void
 virtio_gpu_debugfs_init(struct drm_minor *minor)
 {
        drm_debugfs_create_files(virtio_gpu_debugfs_list,
                                 VIRTIO_GPU_DEBUGFS_ENTRIES,
                                 minor->debugfs_root, minor);
-       return 0;
 }
index 2b7e6ae65546adaddf6ea55bb8b53c97dd580261..cc7fd957a307290293e4d32cb5076b6af9ba5bca 100644 (file)
@@ -30,6 +30,7 @@
 #include <drm/drm_fourcc.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "virtgpu_drv.h"
 
@@ -240,10 +241,6 @@ static const struct drm_connector_funcs virtio_gpu_connector_funcs = {
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 };
 
-static const struct drm_encoder_funcs virtio_gpu_enc_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
 {
        struct drm_device *dev = vgdev->ddev;
@@ -276,8 +273,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
        if (vgdev->has_edid)
                drm_connector_attach_edid_property(connector);
 
-       drm_encoder_init(dev, encoder, &virtio_gpu_enc_funcs,
-                        DRM_MODE_ENCODER_VIRTUAL, NULL);
+       drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_VIRTUAL);
        drm_encoder_helper_add(encoder, &virtio_gpu_enc_helper_funcs);
        encoder->possible_crtcs = 1 << index;
 
index 7879ff58236f1472afb38d55de3ec1782e41fd7b..9ff9f4ac0522ac66b6526175fd40bb75521d44da 100644 (file)
@@ -218,27 +218,19 @@ struct virtio_gpu_fpriv {
        struct mutex context_lock;
 };
 
-/* virtio_ioctl.c */
+/* virtgpu_ioctl.c */
 #define DRM_VIRTIO_NUM_IOCTLS 10
 extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
 void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file);
 
-/* virtio_kms.c */
+/* virtgpu_kms.c */
 int virtio_gpu_init(struct drm_device *dev);
 void virtio_gpu_deinit(struct drm_device *dev);
 void virtio_gpu_release(struct drm_device *dev);
 int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file);
 void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file);
 
-/* virtio_gem.c */
-void virtio_gpu_gem_free_object(struct drm_gem_object *gem_obj);
-int virtio_gpu_gem_init(struct virtio_gpu_device *vgdev);
-void virtio_gpu_gem_fini(struct virtio_gpu_device *vgdev);
-int virtio_gpu_gem_create(struct drm_file *file,
-                         struct drm_device *dev,
-                         struct virtio_gpu_object_params *params,
-                         struct drm_gem_object **obj_p,
-                         uint32_t *handle_p);
+/* virtgpu_gem.c */
 int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
                               struct drm_file *file);
 void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
@@ -264,7 +256,7 @@ void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device *vgdev,
                                       struct virtio_gpu_object_array *objs);
 void virtio_gpu_array_put_free_work(struct work_struct *work);
 
-/* virtio vg */
+/* virtgpu_vq.c */
 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev);
 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev);
 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
@@ -288,10 +280,10 @@ void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
                                uint32_t scanout_id, uint32_t resource_id,
                                uint32_t width, uint32_t height,
                                uint32_t x, uint32_t y);
-int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
-                            struct virtio_gpu_object *obj,
-                            struct virtio_gpu_mem_entry *ents,
-                            unsigned int nents);
+void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
+                             struct virtio_gpu_object *obj,
+                             struct virtio_gpu_mem_entry *ents,
+                             unsigned int nents);
 int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev);
 int virtio_gpu_detach_status_page(struct virtio_gpu_device *vgdev);
 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
@@ -344,17 +336,17 @@ void virtio_gpu_dequeue_fence_func(struct work_struct *work);
 
 void virtio_gpu_notify(struct virtio_gpu_device *vgdev);
 
-/* virtio_gpu_display.c */
+/* virtgpu_display.c */
 void virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev);
 void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev);
 
-/* virtio_gpu_plane.c */
+/* virtgpu_plane.c */
 uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc);
 struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
                                        enum drm_plane_type type,
                                        int index);
 
-/* virtio_gpu_fence.c */
+/* virtgpu_fence.c */
 struct virtio_gpu_fence *virtio_gpu_fence_alloc(
        struct virtio_gpu_device *vgdev);
 void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
@@ -363,7 +355,7 @@ void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
 void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev,
                                    u64 last_seq);
 
-/* virtio_gpu_object */
+/* virtgpu_object.c */
 void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo);
 struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
                                                size_t size);
@@ -379,7 +371,7 @@ struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
        struct drm_device *dev, struct dma_buf_attachment *attach,
        struct sg_table *sgt);
 
-/* virgl debugfs */
-int virtio_gpu_debugfs_init(struct drm_minor *minor);
+/* virtgpu_debugfs.c */
+void virtio_gpu_debugfs_init(struct drm_minor *minor);
 
 #endif
index f0d5a897467752aedbc1ba0ee8dbae6380da3bd2..d6cb350ae52a99289fb73a800fdfbb6ef5c44766 100644 (file)
 
 #include "virtgpu_drv.h"
 
-int virtio_gpu_gem_create(struct drm_file *file,
-                         struct drm_device *dev,
-                         struct virtio_gpu_object_params *params,
-                         struct drm_gem_object **obj_p,
-                         uint32_t *handle_p)
+static int virtio_gpu_gem_create(struct drm_file *file,
+                                struct drm_device *dev,
+                                struct virtio_gpu_object_params *params,
+                                struct drm_gem_object **obj_p,
+                                uint32_t *handle_p)
 {
        struct virtio_gpu_device *vgdev = dev->dev_private;
        struct virtio_gpu_object *obj;
@@ -117,7 +117,7 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
        struct virtio_gpu_object_array *objs;
 
        if (!vgdev->has_virgl_3d)
-               return 0;
+               goto out_notify;
 
        objs = virtio_gpu_array_alloc(1);
        if (!objs)
@@ -126,6 +126,7 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
 
        virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id,
                                               objs);
+out_notify:
        virtio_gpu_notify(vgdev);
        return 0;
 }
index 512daff920387e5103e37743f2f441641a762dd9..5df722072ba0b95998719f841a4d00ca7ae5debc 100644 (file)
@@ -47,7 +47,6 @@ void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file)
        get_task_comm(dbgname, current);
        virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
                                      strlen(dbgname), dbgname);
-       virtio_gpu_notify(vgdev);
        vfpriv->context_created = true;
 
 out_unlock:
index d9039bb7c5e3759d71e21373b127580d70554bf1..6ccbd01cd888c3daedaad691c83711530e53f2e3 100644 (file)
@@ -235,13 +235,8 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
                return ret;
        }
 
-       ret = virtio_gpu_object_attach(vgdev, bo, ents, nents);
-       if (ret != 0) {
-               virtio_gpu_free_object(&shmem_obj->base);
-               return ret;
-       }
+       virtio_gpu_object_attach(vgdev, bo, ents, nents);
 
-       virtio_gpu_notify(vgdev);
        *bo_ptr = bo;
        return 0;
 
index 73854915ec349b6b405cebd2813c2fd63c85711a..9e663a5d9952684f154730b83ed1680b24f2d50b 100644 (file)
@@ -1087,14 +1087,13 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
        virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
 }
 
-int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
-                            struct virtio_gpu_object *obj,
-                            struct virtio_gpu_mem_entry *ents,
-                            unsigned int nents)
+void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
+                             struct virtio_gpu_object *obj,
+                             struct virtio_gpu_mem_entry *ents,
+                             unsigned int nents)
 {
        virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
                                               ents, nents, NULL);
-       return 0;
 }
 
 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
index 860de052e8209369e2680a8aac02135157b1d75b..1e8b2169d834123810e74d3a628118324d28e2bf 100644 (file)
@@ -21,6 +21,7 @@
 #include <drm/drm_file.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_ioctl.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_vblank.h>
 
@@ -34,7 +35,7 @@
 
 static struct vkms_device *vkms_device;
 
-bool enable_cursor;
+bool enable_cursor = true;
 module_param_named(enable_cursor, enable_cursor, bool, 0444);
 MODULE_PARM_DESC(enable_cursor, "Enable/Disable cursor support");
 
@@ -63,7 +64,6 @@ static void vkms_release(struct drm_device *dev)
        platform_device_unregister(vkms->platform);
        drm_atomic_helper_shutdown(&vkms->drm);
        drm_mode_config_cleanup(&vkms->drm);
-       drm_dev_fini(&vkms->drm);
        destroy_workqueue(vkms->output.composer_workq);
 }
 
@@ -158,13 +158,14 @@ static int __init vkms_init(void)
                           &vkms_device->platform->dev);
        if (ret)
                goto out_unregister;
+       drmm_add_final_kfree(&vkms_device->drm, vkms_device);
 
        ret = dma_coerce_mask_and_coherent(vkms_device->drm.dev,
                                           DMA_BIT_MASK(64));
 
        if (ret) {
                DRM_ERROR("Could not initialize DMA support\n");
-               goto out_fini;
+               goto out_put;
        }
 
        vkms_device->drm.irq_enabled = true;
@@ -172,25 +173,25 @@ static int __init vkms_init(void)
        ret = drm_vblank_init(&vkms_device->drm, 1);
        if (ret) {
                DRM_ERROR("Failed to vblank\n");
-               goto out_fini;
+               goto out_put;
        }
 
        ret = vkms_modeset_init(vkms_device);
        if (ret)
-               goto out_fini;
+               goto out_put;
 
        ret = drm_dev_register(&vkms_device->drm, 0);
        if (ret)
-               goto out_fini;
+               goto out_put;
 
        return 0;
 
-out_fini:
-       drm_dev_fini(&vkms_device->drm);
+out_put:
+       drm_dev_put(&vkms_device->drm);
+       return ret;
 
 out_unregister:
        platform_device_unregister(vkms_device->platform);
-
 out_free:
        kfree(vkms_device);
        return ret;
@@ -205,8 +206,6 @@ static void __exit vkms_exit(void)
 
        drm_dev_unregister(&vkms_device->drm);
        drm_dev_put(&vkms_device->drm);
-
-       kfree(vkms_device);
 }
 
 module_init(vkms_init);
index eda04ffba7b1f15ad3321c607fac524092a9c92d..f4036bb0b9a89a51c05acdb1df2c0041af47a4ff 100644 (file)
@@ -117,11 +117,6 @@ struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev,
                                  enum drm_plane_type type, int index);
 
 /* Gem stuff */
-struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
-                                      struct drm_file *file,
-                                      u32 *handle,
-                                      u64 size);
-
 vm_fault_t vkms_gem_fault(struct vm_fault *vmf);
 
 int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
index 2e01186fb943b897b7b40dd29176465d2b85a1e7..c541fec575665b3414a437911a316457a916c6e1 100644 (file)
@@ -97,10 +97,10 @@ vm_fault_t vkms_gem_fault(struct vm_fault *vmf)
        return ret;
 }
 
-struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
-                                      struct drm_file *file,
-                                      u32 *handle,
-                                      u64 size)
+static struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
+                                             struct drm_file *file,
+                                             u32 *handle,
+                                             u64 size)
 {
        struct vkms_gem_object *obj;
        int ret;
@@ -113,7 +113,6 @@ struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
                return ERR_CAST(obj);
 
        ret = drm_gem_handle_create(file, &obj->gem, handle);
-       drm_gem_object_put_unlocked(&obj->gem);
        if (ret)
                return ERR_PTR(ret);
 
@@ -142,6 +141,8 @@ int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
        args->size = gem_obj->size;
        args->pitch = pitch;
 
+       drm_gem_object_put_unlocked(gem_obj);
+
        DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
 
        return 0;
index fb1941a6522cfea38d8d58b419e66e1e57af5082..85afb77e97f0e6a6d9bfd8ee985c29e1b1b845eb 100644 (file)
@@ -3,6 +3,7 @@
 #include "vkms_drv.h"
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 static void vkms_connector_destroy(struct drm_connector *connector)
 {
@@ -17,10 +18,6 @@ static const struct drm_connector_funcs vkms_connector_funcs = {
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 };
 
-static const struct drm_encoder_funcs vkms_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static int vkms_conn_get_modes(struct drm_connector *connector)
 {
        int count;
@@ -70,8 +67,7 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index)
 
        drm_connector_helper_add(connector, &vkms_conn_helper_funcs);
 
-       ret = drm_encoder_init(dev, encoder, &vkms_encoder_funcs,
-                              DRM_MODE_ENCODER_VIRTUAL, NULL);
+       ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_VIRTUAL);
        if (ret) {
                DRM_ERROR("Failed to init encoder\n");
                goto err_encoder;
index 374142018171c98c62a4a78aad5890d6707401cf..1fd458e877caa38e9aaed8c18a64bb95ec7894ac 100644 (file)
@@ -460,9 +460,6 @@ static void xen_drm_drv_release(struct drm_device *dev)
        drm_atomic_helper_shutdown(dev);
        drm_mode_config_cleanup(dev);
 
-       drm_dev_fini(dev);
-       kfree(dev);
-
        if (front_info->cfg.be_alloc)
                xenbus_switch_state(front_info->xb_dev,
                                    XenbusStateInitialising);
@@ -561,6 +558,7 @@ fail_register:
 fail_modeset:
        drm_kms_helper_poll_fini(drm_dev);
        drm_mode_config_cleanup(drm_dev);
+       drm_dev_put(drm_dev);
 fail:
        kfree(drm_info);
        return ret;
index b98a1420dcd38a97b1e893d9d716f190c6f4c734..76a16d997a23b221b8b57af29a316bfcd840e6a1 100644 (file)
@@ -20,6 +20,7 @@
 #include <drm/drm_of.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_print.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include <sound/hdmi-codec.h>
 
@@ -254,10 +255,6 @@ static const struct drm_encoder_helper_funcs zx_hdmi_encoder_helper_funcs = {
        .mode_set = zx_hdmi_encoder_mode_set,
 };
 
-static const struct drm_encoder_funcs zx_hdmi_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static int zx_hdmi_connector_get_modes(struct drm_connector *connector)
 {
        struct zx_hdmi *hdmi = to_zx_hdmi(connector);
@@ -313,8 +310,7 @@ static int zx_hdmi_register(struct drm_device *drm, struct zx_hdmi *hdmi)
 
        encoder->possible_crtcs = VOU_CRTC_MASK;
 
-       drm_encoder_init(drm, encoder, &zx_hdmi_encoder_funcs,
-                        DRM_MODE_ENCODER_TMDS, NULL);
+       drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
        drm_encoder_helper_add(encoder, &zx_hdmi_encoder_helper_funcs);
 
        hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
index c598b7daf1f18f1d4d4d7f7e576070b0e479cc82..d8a89ba383bc870030bca61886fdcd4be1db2353 100644 (file)
@@ -14,6 +14,7 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_print.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "zx_drm_drv.h"
 #include "zx_tvenc_regs.h"
@@ -218,10 +219,6 @@ static const struct drm_encoder_helper_funcs zx_tvenc_encoder_helper_funcs = {
        .mode_set = zx_tvenc_encoder_mode_set,
 };
 
-static const struct drm_encoder_funcs zx_tvenc_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static int zx_tvenc_connector_get_modes(struct drm_connector *connector)
 {
        struct zx_tvenc *tvenc = to_zx_tvenc(connector);
@@ -285,8 +282,7 @@ static int zx_tvenc_register(struct drm_device *drm, struct zx_tvenc *tvenc)
         */
        encoder->possible_crtcs = BIT(1);
 
-       drm_encoder_init(drm, encoder, &zx_tvenc_encoder_funcs,
-                        DRM_MODE_ENCODER_TVDAC, NULL);
+       drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TVDAC);
        drm_encoder_helper_add(encoder, &zx_tvenc_encoder_helper_funcs);
 
        connector->interlace_allowed = true;
index c4fa3bbaba7808574ebcb902bea5daf5884c810d..a7ed7f5ca8370ea3819770beea1ffad5bdb7e566 100644 (file)
@@ -14,6 +14,7 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_print.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "zx_drm_drv.h"
 #include "zx_vga_regs.h"
@@ -72,10 +73,6 @@ static const struct drm_encoder_helper_funcs zx_vga_encoder_helper_funcs = {
        .disable = zx_vga_encoder_disable,
 };
 
-static const struct drm_encoder_funcs zx_vga_encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
 static int zx_vga_connector_get_modes(struct drm_connector *connector)
 {
        struct zx_vga *vga = to_zx_vga(connector);
@@ -154,8 +151,7 @@ static int zx_vga_register(struct drm_device *drm, struct zx_vga *vga)
 
        encoder->possible_crtcs = VOU_CRTC_MASK;
 
-       ret = drm_encoder_init(drm, encoder, &zx_vga_encoder_funcs,
-                              DRM_MODE_ENCODER_DAC, NULL);
+       ret = drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_DAC);
        if (ret) {
                DRM_DEV_ERROR(dev, "failed to init encoder: %d\n", ret);
                return ret;
index 91b0a719d221722a3932f828edcdd70ab9896a24..fa88e8b9a83dfb6bcdf15f7da8f932e0ef83308f 100644 (file)
@@ -472,7 +472,7 @@ config FB_OF
 
 config FB_CONTROL
        bool "Apple \"control\" display support"
-       depends on (FB = y) && PPC_PMAC && PPC32
+       depends on (FB = y) && ((PPC_PMAC && PPC32) || COMPILE_TEST)
        select FB_CFB_FILLRECT
        select FB_CFB_COPYAREA
        select FB_CFB_IMAGEBLIT
index 314ab82e01c027503856d1e6fde3f1f94a6ae276..6f7838979f0a9463840d560512fab732bdd2cf22 100644 (file)
@@ -544,10 +544,6 @@ static int arcfb_probe(struct platform_device *dev)
        par->cslut[1] = 0x06;
        info->flags = FBINFO_FLAG_DEFAULT;
        spin_lock_init(&par->lock);
-       retval = register_framebuffer(info);
-       if (retval < 0)
-               goto err1;
-       platform_set_drvdata(dev, info);
        if (irq) {
                par->irq = irq;
                if (request_irq(par->irq, &arcfb_interrupt, IRQF_SHARED,
@@ -558,6 +554,10 @@ static int arcfb_probe(struct platform_device *dev)
                        goto err1;
                }
        }
+       retval = register_framebuffer(info);
+       if (retval < 0)
+               goto err1;
+       platform_set_drvdata(dev, info);
        fb_info(info, "Arc frame buffer device, using %dK of video memory\n",
                videomemorysize >> 10);
 
@@ -593,6 +593,8 @@ static int arcfb_remove(struct platform_device *dev)
 
        if (info) {
                unregister_framebuffer(info);
+               if (irq)
+                       free_irq(((struct arcfb_par *)(info->par))->irq, info);
                vfree((void __force *)info->screen_base);
                framebuffer_release(info);
        }
index d567f5d56c13e316d087fa6cc99c25cc09ee98ad..1e252192569a8be75d81ec7496f72a4ecde18cd2 100644 (file)
@@ -1114,7 +1114,6 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
 
        sinfo->irq_base = platform_get_irq(pdev, 0);
        if (sinfo->irq_base < 0) {
-               dev_err(dev, "unable to get irq\n");
                ret = sinfo->irq_base;
                goto stop_clk;
        }
index d7e41c8dd533c785fa0a55dfaf11faeb88adaebf..d05d4195acade79cef91e9e2a900beab6ab60676 100644 (file)
@@ -334,20 +334,6 @@ static const struct aty128_meminfo sdr_128 = {
        .name = "128-bit SDR SGRAM (1:1)",
 };
 
-static const struct aty128_meminfo sdr_64 = {
-       .ML = 4,
-       .MB = 8,
-       .Trcd = 3,
-       .Trp = 3,
-       .Twr = 1,
-       .CL = 3,
-       .Tr2w = 1,
-       .LoopLatency = 17,
-       .DspOn = 46,
-       .Rloop = 17,
-       .name = "64-bit SDR SGRAM (1:1)",
-};
-
 static const struct aty128_meminfo sdr_sgram = {
        .ML = 4,
        .MB = 4,
index 175d2598f28e4253b7844577199bd59f9983def3..b0ac895e5ac9af2c5da973e93dbb2e778182b145 100644 (file)
 #ifdef DEBUG
 #define DPRINTK(fmt, args...)  printk(KERN_DEBUG "atyfb: " fmt, ## args)
 #else
-#define DPRINTK(fmt, args...)
+#define DPRINTK(fmt, args...)  no_printk(fmt, ##args)
 #endif
 
 #define PRINTKI(fmt, args...)  printk(KERN_INFO "atyfb: " fmt, ## args)
@@ -3819,9 +3819,9 @@ static int __init atyfb_setup(char *options)
 
        while ((this_opt = strsep(&options, ",")) != NULL) {
                if (!strncmp(this_opt, "noaccel", 7)) {
-                       noaccel = 1;
+                       noaccel = true;
                } else if (!strncmp(this_opt, "nomtrr", 6)) {
-                       nomtrr = 1;
+                       nomtrr = true;
                } else if (!strncmp(this_opt, "vram:", 5))
                        vram = simple_strtoul(this_opt + 5, NULL, 0);
                else if (!strncmp(this_opt, "pll:", 4))
index 38b61cdb5ca409c701f0188e4bb20067cb5b7024..9c4f1be856eca46aefa7eec184167b858d456633 100644 (file)
@@ -31,7 +31,6 @@
  *  more details.
  */
 
-#include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/string.h>
 #include <linux/nvram.h>
 #include <linux/adb.h>
 #include <linux/cuda.h>
+#ifdef CONFIG_PPC_PMAC
 #include <asm/prom.h>
 #include <asm/btext.h>
+#endif
 
 #include "macmodes.h"
 #include "controlfb.h"
 
+#if !defined(CONFIG_PPC_PMAC) || !defined(CONFIG_PPC32)
+#define invalid_vram_cache(addr)
+#undef in_8
+#undef out_8
+#undef in_le32
+#undef out_le32
+#define in_8(addr)             0
+#define out_8(addr, val)
+#define in_le32(addr)          0
+#define out_le32(addr, val)
+#define pgprot_cached_wthru(prot) (prot)
+#else
+static void invalid_vram_cache(void __force *addr)
+{
+       eieio();
+       dcbf(addr);
+       mb();
+       eieio();
+       dcbf(addr);
+       mb();
+}
+#endif
+
 struct fb_par_control {
        int     vmode, cmode;
        int     xres, yres;
@@ -117,38 +141,6 @@ struct fb_info_control {
 #define CNTRL_REG(INFO,REG) (&(((INFO)->control_regs->REG).r))
 
 
-/******************** Prototypes for exported functions ********************/
-/*
- * struct fb_ops
- */
-static int controlfb_pan_display(struct fb_var_screeninfo *var,
-       struct fb_info *info);
-static int controlfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
-       u_int transp, struct fb_info *info);
-static int controlfb_blank(int blank_mode, struct fb_info *info);
-static int controlfb_mmap(struct fb_info *info,
-       struct vm_area_struct *vma);
-static int controlfb_set_par (struct fb_info *info);
-static int controlfb_check_var (struct fb_var_screeninfo *var, struct fb_info *info);
-
-/******************** Prototypes for internal functions **********************/
-
-static void set_control_clock(unsigned char *params);
-static int init_control(struct fb_info_control *p);
-static void control_set_hardware(struct fb_info_control *p,
-       struct fb_par_control *par);
-static int control_of_init(struct device_node *dp);
-static void find_vram_size(struct fb_info_control *p);
-static int read_control_sense(struct fb_info_control *p);
-static int calc_clock_params(unsigned long clk, unsigned char *param);
-static int control_var_to_par(struct fb_var_screeninfo *var,
-       struct fb_par_control *par, const struct fb_info *fb_info);
-static inline void control_par_to_var(struct fb_par_control *par,
-       struct fb_var_screeninfo *var);
-static void control_init_info(struct fb_info *info, struct fb_info_control *p);
-static void control_cleanup(void);
-
-
 /************************** Internal variables *******************************/
 
 static struct fb_info_control *control_fb;
@@ -157,189 +149,6 @@ static int default_vmode __initdata = VMODE_NVRAM;
 static int default_cmode __initdata = CMODE_NVRAM;
 
 
-static const struct fb_ops controlfb_ops = {
-       .owner          = THIS_MODULE,
-       .fb_check_var   = controlfb_check_var,
-       .fb_set_par     = controlfb_set_par,
-       .fb_setcolreg   = controlfb_setcolreg,
-       .fb_pan_display = controlfb_pan_display,
-       .fb_blank       = controlfb_blank,
-       .fb_mmap        = controlfb_mmap,
-       .fb_fillrect    = cfb_fillrect,
-       .fb_copyarea    = cfb_copyarea,
-       .fb_imageblit   = cfb_imageblit,
-};
-
-
-/********************  The functions for controlfb_ops ********************/
-
-#ifdef MODULE
-MODULE_LICENSE("GPL");
-
-int init_module(void)
-{
-       struct device_node *dp;
-       int ret = -ENXIO;
-
-       dp = of_find_node_by_name(NULL, "control");
-       if (dp && !control_of_init(dp))
-               ret = 0;
-       of_node_put(dp);
-
-       return ret;
-}
-
-void cleanup_module(void)
-{
-       control_cleanup();
-}
-#endif
-
-/*
- * Checks a var structure
- */
-static int controlfb_check_var (struct fb_var_screeninfo *var, struct fb_info *info)
-{
-       struct fb_par_control par;
-       int err;
-
-       err = control_var_to_par(var, &par, info);
-       if (err)
-               return err;     
-       control_par_to_var(&par, var);
-
-       return 0;
-}
-
-/*
- * Applies current var to display
- */
-static int controlfb_set_par (struct fb_info *info)
-{
-       struct fb_info_control *p =
-               container_of(info, struct fb_info_control, info);
-       struct fb_par_control par;
-       int err;
-
-       if((err = control_var_to_par(&info->var, &par, info))) {
-               printk (KERN_ERR "controlfb_set_par: error calling"
-                                " control_var_to_par: %d.\n", err);
-               return err;
-       }
-       
-       control_set_hardware(p, &par);
-
-       info->fix.visual = (p->par.cmode == CMODE_8) ?
-               FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
-       info->fix.line_length = p->par.pitch;
-       info->fix.xpanstep = 32 >> p->par.cmode;
-       info->fix.ypanstep = 1;
-
-       return 0;
-}
-
-/*
- * Set screen start address according to var offset values
- */
-static inline void set_screen_start(int xoffset, int yoffset,
-       struct fb_info_control *p)
-{
-       struct fb_par_control *par = &p->par;
-
-       par->xoffset = xoffset;
-       par->yoffset = yoffset;
-       out_le32(CNTRL_REG(p,start_addr),
-                par->yoffset * par->pitch + (par->xoffset << par->cmode));
-}
-
-
-static int controlfb_pan_display(struct fb_var_screeninfo *var,
-                                struct fb_info *info)
-{
-       unsigned int xoffset, hstep;
-       struct fb_info_control *p =
-               container_of(info, struct fb_info_control, info);
-       struct fb_par_control *par = &p->par;
-
-       /*
-        * make sure start addr will be 32-byte aligned
-        */
-       hstep = 0x1f >> par->cmode;
-       xoffset = (var->xoffset + hstep) & ~hstep;
-
-       if (xoffset+par->xres > par->vxres ||
-           var->yoffset+par->yres > par->vyres)
-               return -EINVAL;
-
-       set_screen_start(xoffset, var->yoffset, p);
-
-       return 0;
-}
-
-
-/*
- * Private mmap since we want to have a different caching on the framebuffer
- * for controlfb.
- * Note there's no locking in here; it's done in fb_mmap() in fbmem.c.
- */
-static int controlfb_mmap(struct fb_info *info,
-                       struct vm_area_struct *vma)
-{
-       unsigned long mmio_pgoff;
-       unsigned long start;
-       u32 len;
-
-       start = info->fix.smem_start;
-       len = info->fix.smem_len;
-       mmio_pgoff = PAGE_ALIGN((start & ~PAGE_MASK) + len) >> PAGE_SHIFT;
-       if (vma->vm_pgoff >= mmio_pgoff) {
-               if (info->var.accel_flags)
-                       return -EINVAL;
-               vma->vm_pgoff -= mmio_pgoff;
-               start = info->fix.mmio_start;
-               len = info->fix.mmio_len;
-               vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-       } else {
-               /* framebuffer */
-               vma->vm_page_prot = pgprot_cached_wthru(vma->vm_page_prot);
-       }
-
-       return vm_iomap_memory(vma, start, len);
-}
-
-static int controlfb_blank(int blank_mode, struct fb_info *info)
-{
-       struct fb_info_control *p =
-               container_of(info, struct fb_info_control, info);
-       unsigned ctrl;
-
-       ctrl = le32_to_cpup(CNTRL_REG(p,ctrl));
-       if (blank_mode > 0)
-               switch (blank_mode) {
-               case FB_BLANK_VSYNC_SUSPEND:
-                       ctrl &= ~3;
-                       break;
-               case FB_BLANK_HSYNC_SUSPEND:
-                       ctrl &= ~0x30;
-                       break;
-               case FB_BLANK_POWERDOWN:
-                       ctrl &= ~0x33;
-                       /* fall through */
-               case FB_BLANK_NORMAL:
-                       ctrl |= 0x400;
-                       break;
-               default:
-                       break;
-               }
-       else {
-               ctrl &= ~0x400;
-               ctrl |= 0x33;
-       }
-       out_le32(CNTRL_REG(p,ctrl), ctrl);
-
-       return 0;
-}
-
 static int controlfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
                             u_int transp, struct fb_info *info)
 {
@@ -396,88 +205,31 @@ static void set_control_clock(unsigned char *params)
 #endif 
 }
 
-
 /*
- * finish off the driver initialization and register
+ * Set screen start address according to var offset values
  */
-static int __init init_control(struct fb_info_control *p)
+static inline void set_screen_start(int xoffset, int yoffset,
+       struct fb_info_control *p)
 {
-       int full, sense, vmode, cmode, vyres;
-       struct fb_var_screeninfo var;
-       int rc;
-       
-       printk(KERN_INFO "controlfb: ");
-
-       full = p->total_vram == 0x400000;
+       struct fb_par_control *par = &p->par;
 
-       /* Try to pick a video mode out of NVRAM if we have one. */
-       cmode = default_cmode;
-       if (IS_REACHABLE(CONFIG_NVRAM) && cmode == CMODE_NVRAM)
-               cmode = nvram_read_byte(NV_CMODE);
-       if (cmode < CMODE_8 || cmode > CMODE_32)
-               cmode = CMODE_8;
+       par->xoffset = xoffset;
+       par->yoffset = yoffset;
+       out_le32(CNTRL_REG(p,start_addr),
+                par->yoffset * par->pitch + (par->xoffset << par->cmode));
+}
 
-       vmode = default_vmode;
-       if (IS_REACHABLE(CONFIG_NVRAM) && vmode == VMODE_NVRAM)
-               vmode = nvram_read_byte(NV_VMODE);
-       if (vmode < 1 || vmode > VMODE_MAX ||
-           control_mac_modes[vmode - 1].m[full] < cmode) {
-               sense = read_control_sense(p);
-               printk(KERN_CONT "Monitor sense value = 0x%x, ", sense);
-               vmode = mac_map_monitor_sense(sense);
-               if (control_mac_modes[vmode - 1].m[full] < 0)
-                       vmode = VMODE_640_480_60;
-               cmode = min(cmode, control_mac_modes[vmode - 1].m[full]);
-       }
+#define RADACAL_WRITE(a,d) \
+       out_8(&p->cmap_regs->addr, (a)); \
+       out_8(&p->cmap_regs->dat,   (d))
 
-       /* Initialize info structure */
-       control_init_info(&p->info, p);
-
-       /* Setup default var */
-       if (mac_vmode_to_var(vmode, cmode, &var) < 0) {
-               /* This shouldn't happen! */
-               printk("mac_vmode_to_var(%d, %d,) failed\n", vmode, cmode);
-try_again:
-               vmode = VMODE_640_480_60;
-               cmode = CMODE_8;
-               if (mac_vmode_to_var(vmode, cmode, &var) < 0) {
-                       printk(KERN_ERR "controlfb: mac_vmode_to_var() failed\n");
-                       return -ENXIO;
-               }
-               printk(KERN_INFO "controlfb: ");
-       }
-       printk("using video mode %d and color mode %d.\n", vmode, cmode);
-
-       vyres = (p->total_vram - CTRLFB_OFF) / (var.xres << cmode);
-       if (vyres > var.yres)
-               var.yres_virtual = vyres;
-
-       /* Apply default var */
-       var.activate = FB_ACTIVATE_NOW;
-       rc = fb_set_var(&p->info, &var);
-       if (rc && (vmode != VMODE_640_480_60 || cmode != CMODE_8))
-               goto try_again;
-
-       /* Register with fbdev layer */
-       if (register_framebuffer(&p->info) < 0)
-               return -ENXIO;
-
-       fb_info(&p->info, "control display adapter\n");
-
-       return 0;
-}
-
-#define RADACAL_WRITE(a,d) \
-       out_8(&p->cmap_regs->addr, (a)); \
-       out_8(&p->cmap_regs->dat,   (d))
-
-/* Now how about actually saying, Make it so! */
-/* Some things in here probably don't need to be done each time. */
-static void control_set_hardware(struct fb_info_control *p, struct fb_par_control *par)
-{
-       struct control_regvals  *r;
-       volatile struct preg    __iomem *rp;
-       int                     i, cmode;
+/* Now how about actually saying, Make it so! */
+/* Some things in here probably don't need to be done each time. */
+static void control_set_hardware(struct fb_info_control *p, struct fb_par_control *par)
+{
+       struct control_regvals  *r;
+       volatile struct preg    __iomem *rp;
+       int                     i, cmode;
 
        if (PAR_EQUAL(&p->par, par)) {
                /*
@@ -528,67 +280,6 @@ static void control_set_hardware(struct fb_info_control *p, struct fb_par_contro
 #endif /* CONFIG_BOOTX_TEXT */
 }
 
-
-/*
- * Parse user specified options (`video=controlfb:')
- */
-static void __init control_setup(char *options)
-{
-       char *this_opt;
-
-       if (!options || !*options)
-               return;
-
-       while ((this_opt = strsep(&options, ",")) != NULL) {
-               if (!strncmp(this_opt, "vmode:", 6)) {
-                       int vmode = simple_strtoul(this_opt+6, NULL, 0);
-                       if (vmode > 0 && vmode <= VMODE_MAX &&
-                           control_mac_modes[vmode - 1].m[1] >= 0)
-                               default_vmode = vmode;
-               } else if (!strncmp(this_opt, "cmode:", 6)) {
-                       int depth = simple_strtoul(this_opt+6, NULL, 0);
-                       switch (depth) {
-                        case CMODE_8:
-                        case CMODE_16:
-                        case CMODE_32:
-                               default_cmode = depth;
-                               break;
-                        case 8:
-                               default_cmode = CMODE_8;
-                               break;
-                        case 15:
-                        case 16:
-                               default_cmode = CMODE_16;
-                               break;
-                        case 24:
-                        case 32:
-                               default_cmode = CMODE_32;
-                               break;
-                       }
-               }
-       }
-}
-
-static int __init control_init(void)
-{
-       struct device_node *dp;
-       char *option = NULL;
-       int ret = -ENXIO;
-
-       if (fb_get_options("controlfb", &option))
-               return -ENODEV;
-       control_setup(option);
-
-       dp = of_find_node_by_name(NULL, "control");
-       if (dp && !control_of_init(dp))
-               ret = 0;
-       of_node_put(dp);
-
-       return ret;
-}
-
-module_init(control_init);
-
 /* Work out which banks of VRAM we have installed. */
 /* danj: I guess the card just ignores writes to nonexistant VRAM... */
 
@@ -605,12 +296,7 @@ static void __init find_vram_size(struct fb_info_control *p)
 
        out_8(&p->frame_buffer[0x600000], 0xb3);
        out_8(&p->frame_buffer[0x600001], 0x71);
-       asm volatile("eieio; dcbf 0,%0" : : "r" (&p->frame_buffer[0x600000])
-                                       : "memory" );
-       mb();
-       asm volatile("eieio; dcbi 0,%0" : : "r" (&p->frame_buffer[0x600000])
-                                       : "memory" );
-       mb();
+       invalid_vram_cache(&p->frame_buffer[0x600000]);
 
        bank2 = (in_8(&p->frame_buffer[0x600000]) == 0xb3)
                && (in_8(&p->frame_buffer[0x600001]) == 0x71);
@@ -624,12 +310,7 @@ static void __init find_vram_size(struct fb_info_control *p)
 
        out_8(&p->frame_buffer[0], 0x5a);
        out_8(&p->frame_buffer[1], 0xc7);
-       asm volatile("eieio; dcbf 0,%0" : : "r" (&p->frame_buffer[0])
-                                       : "memory" );
-       mb();
-       asm volatile("eieio; dcbi 0,%0" : : "r" (&p->frame_buffer[0])
-                                       : "memory" );
-       mb();
+       invalid_vram_cache(&p->frame_buffer[0]);
 
        bank1 = (in_8(&p->frame_buffer[0]) == 0x5a)
                && (in_8(&p->frame_buffer[1]) == 0xc7);
@@ -663,78 +344,6 @@ static void __init find_vram_size(struct fb_info_control *p)
                        (bank1 + bank2) << 1, bank1 << 1, bank2 << 1);
 }
 
-
-/*
- * find "control" and initialize
- */
-static int __init control_of_init(struct device_node *dp)
-{
-       struct fb_info_control  *p;
-       struct resource         fb_res, reg_res;
-
-       if (control_fb) {
-               printk(KERN_ERR "controlfb: only one control is supported\n");
-               return -ENXIO;
-       }
-
-       if (of_pci_address_to_resource(dp, 2, &fb_res) ||
-           of_pci_address_to_resource(dp, 1, &reg_res)) {
-               printk(KERN_ERR "can't get 2 addresses for control\n");
-               return -ENXIO;
-       }
-       p = kzalloc(sizeof(*p), GFP_KERNEL);
-       if (!p)
-               return -ENOMEM;
-       control_fb = p; /* save it for cleanups */
-
-       /* Map in frame buffer and registers */
-       p->fb_orig_base = fb_res.start;
-       p->fb_orig_size = resource_size(&fb_res);
-       /* use the big-endian aperture (??) */
-       p->frame_buffer_phys = fb_res.start + 0x800000;
-       p->control_regs_phys = reg_res.start;
-       p->control_regs_size = resource_size(&reg_res);
-
-       if (!p->fb_orig_base ||
-           !request_mem_region(p->fb_orig_base,p->fb_orig_size,"controlfb")) {
-               p->fb_orig_base = 0;
-               goto error_out;
-       }
-       /* map at most 8MB for the frame buffer */
-       p->frame_buffer = ioremap_wt(p->frame_buffer_phys, 0x800000);
-
-       if (!p->control_regs_phys ||
-           !request_mem_region(p->control_regs_phys, p->control_regs_size,
-           "controlfb regs")) {
-               p->control_regs_phys = 0;
-               goto error_out;
-       }
-       p->control_regs = ioremap(p->control_regs_phys, p->control_regs_size);
-
-       p->cmap_regs_phys = 0xf301b000;  /* XXX not in prom? */
-       if (!request_mem_region(p->cmap_regs_phys, 0x1000, "controlfb cmap")) {
-               p->cmap_regs_phys = 0;
-               goto error_out;
-       }
-       p->cmap_regs = ioremap(p->cmap_regs_phys, 0x1000);
-
-       if (!p->cmap_regs || !p->control_regs || !p->frame_buffer)
-               goto error_out;
-
-       find_vram_size(p);
-       if (!p->total_vram)
-               goto error_out;
-
-       if (init_control(p) < 0)
-               goto error_out;
-
-       return 0;
-
-error_out:
-       control_cleanup();
-       return -ENXIO;
-}
-
 /*
  * Get the monitor sense value.
  * Note that this can be called before calibrate_delay,
@@ -1019,6 +628,150 @@ static void control_par_to_var(struct fb_par_control *par, struct fb_var_screeni
        var->pixclock >>= par->regvals.clock_params[2];
 }
 
+/********************  The functions for controlfb_ops ********************/
+
+/*
+ * Checks a var structure
+ */
+static int controlfb_check_var (struct fb_var_screeninfo *var, struct fb_info *info)
+{
+       struct fb_par_control par;
+       int err;
+
+       err = control_var_to_par(var, &par, info);
+       if (err)
+               return err;     
+       control_par_to_var(&par, var);
+
+       return 0;
+}
+
+/*
+ * Applies current var to display
+ */
+static int controlfb_set_par (struct fb_info *info)
+{
+       struct fb_info_control *p =
+               container_of(info, struct fb_info_control, info);
+       struct fb_par_control par;
+       int err;
+
+       if((err = control_var_to_par(&info->var, &par, info))) {
+               printk (KERN_ERR "controlfb_set_par: error calling"
+                                " control_var_to_par: %d.\n", err);
+               return err;
+       }
+       
+       control_set_hardware(p, &par);
+
+       info->fix.visual = (p->par.cmode == CMODE_8) ?
+               FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
+       info->fix.line_length = p->par.pitch;
+       info->fix.xpanstep = 32 >> p->par.cmode;
+       info->fix.ypanstep = 1;
+
+       return 0;
+}
+
+static int controlfb_pan_display(struct fb_var_screeninfo *var,
+                                struct fb_info *info)
+{
+       unsigned int xoffset, hstep;
+       struct fb_info_control *p =
+               container_of(info, struct fb_info_control, info);
+       struct fb_par_control *par = &p->par;
+
+       /*
+        * make sure start addr will be 32-byte aligned
+        */
+       hstep = 0x1f >> par->cmode;
+       xoffset = (var->xoffset + hstep) & ~hstep;
+
+       if (xoffset+par->xres > par->vxres ||
+           var->yoffset+par->yres > par->vyres)
+               return -EINVAL;
+
+       set_screen_start(xoffset, var->yoffset, p);
+
+       return 0;
+}
+
+static int controlfb_blank(int blank_mode, struct fb_info *info)
+{
+       struct fb_info_control __maybe_unused *p =
+               container_of(info, struct fb_info_control, info);
+       unsigned ctrl;
+
+       ctrl = in_le32(CNTRL_REG(p, ctrl));
+       if (blank_mode > 0)
+               switch (blank_mode) {
+               case FB_BLANK_VSYNC_SUSPEND:
+                       ctrl &= ~3;
+                       break;
+               case FB_BLANK_HSYNC_SUSPEND:
+                       ctrl &= ~0x30;
+                       break;
+               case FB_BLANK_POWERDOWN:
+                       ctrl &= ~0x33;
+                       /* fall through */
+               case FB_BLANK_NORMAL:
+                       ctrl |= 0x400;
+                       break;
+               default:
+                       break;
+               }
+       else {
+               ctrl &= ~0x400;
+               ctrl |= 0x33;
+       }
+       out_le32(CNTRL_REG(p,ctrl), ctrl);
+
+       return 0;
+}
+
+/*
+ * Private mmap since we want to have a different caching on the framebuffer
+ * for controlfb.
+ * Note there's no locking in here; it's done in fb_mmap() in fbmem.c.
+ */
+static int controlfb_mmap(struct fb_info *info,
+                       struct vm_area_struct *vma)
+{
+       unsigned long mmio_pgoff;
+       unsigned long start;
+       u32 len;
+
+       start = info->fix.smem_start;
+       len = info->fix.smem_len;
+       mmio_pgoff = PAGE_ALIGN((start & ~PAGE_MASK) + len) >> PAGE_SHIFT;
+       if (vma->vm_pgoff >= mmio_pgoff) {
+               if (info->var.accel_flags)
+                       return -EINVAL;
+               vma->vm_pgoff -= mmio_pgoff;
+               start = info->fix.mmio_start;
+               len = info->fix.mmio_len;
+               vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+       } else {
+               /* framebuffer */
+               vma->vm_page_prot = pgprot_cached_wthru(vma->vm_page_prot);
+       }
+
+       return vm_iomap_memory(vma, start, len);
+}
+
+static const struct fb_ops controlfb_ops = {
+       .owner          = THIS_MODULE,
+       .fb_check_var   = controlfb_check_var,
+       .fb_set_par     = controlfb_set_par,
+       .fb_setcolreg   = controlfb_setcolreg,
+       .fb_pan_display = controlfb_pan_display,
+       .fb_blank       = controlfb_blank,
+       .fb_mmap        = controlfb_mmap,
+       .fb_fillrect    = cfb_fillrect,
+       .fb_copyarea    = cfb_copyarea,
+       .fb_imageblit   = cfb_imageblit,
+};
+
 /*
  * Set misc info vars for this driver
  */
@@ -1045,6 +798,115 @@ static void __init control_init_info(struct fb_info *info, struct fb_info_contro
         info->fix.accel = FB_ACCEL_NONE;
 }
 
+/*
+ * Parse user specified options (`video=controlfb:')
+ */
+static void __init control_setup(char *options)
+{
+       char *this_opt;
+
+       if (!options || !*options)
+               return;
+
+       while ((this_opt = strsep(&options, ",")) != NULL) {
+               if (!strncmp(this_opt, "vmode:", 6)) {
+                       int vmode = simple_strtoul(this_opt+6, NULL, 0);
+                       if (vmode > 0 && vmode <= VMODE_MAX &&
+                           control_mac_modes[vmode - 1].m[1] >= 0)
+                               default_vmode = vmode;
+               } else if (!strncmp(this_opt, "cmode:", 6)) {
+                       int depth = simple_strtoul(this_opt+6, NULL, 0);
+                       switch (depth) {
+                        case CMODE_8:
+                        case CMODE_16:
+                        case CMODE_32:
+                               default_cmode = depth;
+                               break;
+                        case 8:
+                               default_cmode = CMODE_8;
+                               break;
+                        case 15:
+                        case 16:
+                               default_cmode = CMODE_16;
+                               break;
+                        case 24:
+                        case 32:
+                               default_cmode = CMODE_32;
+                               break;
+                       }
+               }
+       }
+}
+
+/*
+ * finish off the driver initialization and register
+ */
+static int __init init_control(struct fb_info_control *p)
+{
+       int full, sense, vmode, cmode, vyres;
+       struct fb_var_screeninfo var;
+       int rc;
+       
+       printk(KERN_INFO "controlfb: ");
+
+       full = p->total_vram == 0x400000;
+
+       /* Try to pick a video mode out of NVRAM if we have one. */
+       cmode = default_cmode;
+       if (IS_REACHABLE(CONFIG_NVRAM) && cmode == CMODE_NVRAM)
+               cmode = nvram_read_byte(NV_CMODE);
+       if (cmode < CMODE_8 || cmode > CMODE_32)
+               cmode = CMODE_8;
+
+       vmode = default_vmode;
+       if (IS_REACHABLE(CONFIG_NVRAM) && vmode == VMODE_NVRAM)
+               vmode = nvram_read_byte(NV_VMODE);
+       if (vmode < 1 || vmode > VMODE_MAX ||
+           control_mac_modes[vmode - 1].m[full] < cmode) {
+               sense = read_control_sense(p);
+               printk(KERN_CONT "Monitor sense value = 0x%x, ", sense);
+               vmode = mac_map_monitor_sense(sense);
+               if (control_mac_modes[vmode - 1].m[full] < 0)
+                       vmode = VMODE_640_480_60;
+               cmode = min(cmode, control_mac_modes[vmode - 1].m[full]);
+       }
+
+       /* Initialize info structure */
+       control_init_info(&p->info, p);
+
+       /* Setup default var */
+       if (mac_vmode_to_var(vmode, cmode, &var) < 0) {
+               /* This shouldn't happen! */
+               printk("mac_vmode_to_var(%d, %d,) failed\n", vmode, cmode);
+try_again:
+               vmode = VMODE_640_480_60;
+               cmode = CMODE_8;
+               if (mac_vmode_to_var(vmode, cmode, &var) < 0) {
+                       printk(KERN_ERR "controlfb: mac_vmode_to_var() failed\n");
+                       return -ENXIO;
+               }
+               printk(KERN_INFO "controlfb: ");
+       }
+       printk("using video mode %d and color mode %d.\n", vmode, cmode);
+
+       vyres = (p->total_vram - CTRLFB_OFF) / (var.xres << cmode);
+       if (vyres > var.yres)
+               var.yres_virtual = vyres;
+
+       /* Apply default var */
+       var.activate = FB_ACTIVATE_NOW;
+       rc = fb_set_var(&p->info, &var);
+       if (rc && (vmode != VMODE_640_480_60 || cmode != CMODE_8))
+               goto try_again;
+
+       /* Register with fbdev layer */
+       if (register_framebuffer(&p->info) < 0)
+               return -ENXIO;
+
+       fb_info(&p->info, "control display adapter\n");
+
+       return 0;
+}
 
 static void control_cleanup(void)
 {
@@ -1071,4 +933,93 @@ static void control_cleanup(void)
        kfree(p);
 }
 
+/*
+ * find "control" and initialize
+ */
+static int __init control_of_init(struct device_node *dp)
+{
+       struct fb_info_control  *p;
+       struct resource         fb_res, reg_res;
+
+       if (control_fb) {
+               printk(KERN_ERR "controlfb: only one control is supported\n");
+               return -ENXIO;
+       }
+
+       if (of_pci_address_to_resource(dp, 2, &fb_res) ||
+           of_pci_address_to_resource(dp, 1, &reg_res)) {
+               printk(KERN_ERR "can't get 2 addresses for control\n");
+               return -ENXIO;
+       }
+       p = kzalloc(sizeof(*p), GFP_KERNEL);
+       if (!p)
+               return -ENOMEM;
+       control_fb = p; /* save it for cleanups */
+
+       /* Map in frame buffer and registers */
+       p->fb_orig_base = fb_res.start;
+       p->fb_orig_size = resource_size(&fb_res);
+       /* use the big-endian aperture (??) */
+       p->frame_buffer_phys = fb_res.start + 0x800000;
+       p->control_regs_phys = reg_res.start;
+       p->control_regs_size = resource_size(&reg_res);
+
+       if (!p->fb_orig_base ||
+           !request_mem_region(p->fb_orig_base,p->fb_orig_size,"controlfb")) {
+               p->fb_orig_base = 0;
+               goto error_out;
+       }
+       /* map at most 8MB for the frame buffer */
+       p->frame_buffer = ioremap_wt(p->frame_buffer_phys, 0x800000);
+
+       if (!p->control_regs_phys ||
+           !request_mem_region(p->control_regs_phys, p->control_regs_size,
+           "controlfb regs")) {
+               p->control_regs_phys = 0;
+               goto error_out;
+       }
+       p->control_regs = ioremap(p->control_regs_phys, p->control_regs_size);
+
+       p->cmap_regs_phys = 0xf301b000;  /* XXX not in prom? */
+       if (!request_mem_region(p->cmap_regs_phys, 0x1000, "controlfb cmap")) {
+               p->cmap_regs_phys = 0;
+               goto error_out;
+       }
+       p->cmap_regs = ioremap(p->cmap_regs_phys, 0x1000);
+
+       if (!p->cmap_regs || !p->control_regs || !p->frame_buffer)
+               goto error_out;
+
+       find_vram_size(p);
+       if (!p->total_vram)
+               goto error_out;
+
+       if (init_control(p) < 0)
+               goto error_out;
+
+       return 0;
+
+error_out:
+       control_cleanup();
+       return -ENXIO;
+}
+
+static int __init control_init(void)
+{
+       struct device_node *dp;
+       char *option = NULL;
+       int ret = -ENXIO;
+
+       if (fb_get_options("controlfb", &option))
+               return -ENODEV;
+       control_setup(option);
+
+       dp = of_find_node_by_name(NULL, "control");
+       if (dp && !control_of_init(dp))
+               ret = 0;
+       of_node_put(dp);
+
+       return ret;
+}
 
+device_initcall(control_init);
index 8e2e19f3bf44196d09cd8b6c33d60e2ad12ef013..d62a1e43864e60367ed07b67d6ab6fcfac0240b1 100644 (file)
@@ -44,7 +44,7 @@
 #ifdef DEBUG
 #define DPRINTK(fmt, args...) printk(fmt,## args)
 #else
-#define DPRINTK(fmt, args...)
+#define DPRINTK(fmt, args...) no_printk(fmt, ##args)
 #endif
 
 #define FBMON_FIX_HEADER  1
index 460826a7ad551d7efdbf9a1ba8a44c6698e6d63b..513f58f28b0fddcfdd44d6c3f7621464cc552786 100644 (file)
@@ -1160,12 +1160,14 @@ EXPORT_SYMBOL(cyber2000fb_detach);
 #define DDC_SDA_IN     (1 << 6)
 
 static void cyber2000fb_enable_ddc(struct cfb_info *cfb)
+       __acquires(&cfb->reg_b0_lock)
 {
        spin_lock(&cfb->reg_b0_lock);
        cyber2000fb_writew(0x1bf, 0x3ce, cfb);
 }
 
 static void cyber2000fb_disable_ddc(struct cfb_info *cfb)
+       __releases(&cfb->reg_b0_lock)
 {
        cyber2000fb_writew(0x0bf, 0x3ce, cfb);
        spin_unlock(&cfb->reg_b0_lock);
index aa7583d963ac99153f9848c463e7f6591bbb65c2..13bbf7fe13bf30ecbf388eb06a6de32744e4784f 100644 (file)
@@ -1966,13 +1966,13 @@ static int i810fb_setup(char *options)
        
        while ((this_opt = strsep(&options, ",")) != NULL) {
                if (!strncmp(this_opt, "mtrr", 4))
-                       mtrr = 1;
+                       mtrr = true;
                else if (!strncmp(this_opt, "accel", 5))
-                       accel = 1;
+                       accel = true;
                else if (!strncmp(this_opt, "extvga", 6))
-                       extvga = 1;
+                       extvga = true;
                else if (!strncmp(this_opt, "sync", 4))
-                       sync = 1;
+                       sync = true;
                else if (!strncmp(this_opt, "vram:", 5))
                        vram = (simple_strtoul(this_opt+5, NULL, 0));
                else if (!strncmp(this_opt, "voffset:", 8))
@@ -1998,7 +1998,7 @@ static int i810fb_setup(char *options)
                else if (!strncmp(this_opt, "vsync2:", 7))
                        vsync2 = simple_strtoul(this_opt+7, NULL, 0);
                else if (!strncmp(this_opt, "dcolor", 6))
-                       dcolor = 1;
+                       dcolor = true;
                else if (!strncmp(this_opt, "ddc3", 4))
                        ddc3 = true;
                else
index 370bf2553d43320ebb7036383a250b5f919cc43f..884b16efa7e8a93eae6d3b628e11d46142373d30 100644 (file)
@@ -172,6 +172,7 @@ struct imxfb_info {
        int                     num_modes;
 
        struct regulator        *lcd_pwr;
+       int                     lcd_pwr_enabled;
 };
 
 static const struct platform_device_id imxfb_devtype[] = {
@@ -801,16 +802,30 @@ static int imxfb_lcd_get_power(struct lcd_device *lcddev)
        return FB_BLANK_UNBLANK;
 }
 
+static int imxfb_regulator_set(struct imxfb_info *fbi, int enable)
+{
+       int ret;
+
+       if (enable == fbi->lcd_pwr_enabled)
+               return 0;
+
+       if (enable)
+               ret = regulator_enable(fbi->lcd_pwr);
+       else
+               ret = regulator_disable(fbi->lcd_pwr);
+
+       if (ret == 0)
+               fbi->lcd_pwr_enabled = enable;
+
+       return ret;
+}
+
 static int imxfb_lcd_set_power(struct lcd_device *lcddev, int power)
 {
        struct imxfb_info *fbi = dev_get_drvdata(&lcddev->dev);
 
-       if (!IS_ERR(fbi->lcd_pwr)) {
-               if (power == FB_BLANK_UNBLANK)
-                       return regulator_enable(fbi->lcd_pwr);
-               else
-                       return regulator_disable(fbi->lcd_pwr);
-       }
+       if (!IS_ERR(fbi->lcd_pwr))
+               return imxfb_regulator_set(fbi, power == FB_BLANK_UNBLANK);
 
        return 0;
 }
index c15f8a57498ed15e9538c18f78c5ced10c42144e..ff8e321a22cefc0b48c56fa73a25cab3219ddf7a 100644 (file)
@@ -333,11 +333,9 @@ static int __g450_setclk(struct matrox_fb_info *minfo, unsigned int fout,
                         unsigned int *deltaarray)
 {
        unsigned int mnpcount;
-       unsigned int pixel_vco;
        const struct matrox_pll_limits* pi;
        struct matrox_pll_cache* ci;
 
-       pixel_vco = 0;
        switch (pll) {
                case M_PIXEL_PLL_A:
                case M_PIXEL_PLL_B:
@@ -420,7 +418,6 @@ static int __g450_setclk(struct matrox_fb_info *minfo, unsigned int fout,
                                
                                mnp = matroxfb_DAC_in(minfo, M1064_XPIXPLLCM) << 16;
                                mnp |= matroxfb_DAC_in(minfo, M1064_XPIXPLLCN) << 8;
-                               pixel_vco = g450_mnp2vco(minfo, mnp);
                                matroxfb_DAC_unlock_irqrestore(flags);
                        }
                        pi = &minfo->limits.video;
@@ -441,25 +438,6 @@ static int __g450_setclk(struct matrox_fb_info *minfo, unsigned int fout,
                        unsigned int delta;
 
                        vco = g450_mnp2vco(minfo, mnp);
-#if 0                  
-                       if (pll == M_VIDEO_PLL) {
-                               unsigned int big, small;
-
-                               if (vco < pixel_vco) {
-                                       small = vco;
-                                       big = pixel_vco;
-                               } else {
-                                       small = pixel_vco;
-                                       big = vco;
-                               }
-                               while (big > small) {
-                                       big >>= 1;
-                               }
-                               if (big == small) {
-                                       continue;
-                               }
-                       }
-#endif                 
                        delta = pll_freq_delta(fout, g450_vco2f(mnp, vco));
                        for (idx = mnpcount; idx > 0; idx--) {
                                /* == is important; due to nextpll algorithm we get
index f85ad25659e56e6e27695c0f1a949812d37fbb9b..759dee996af1b0cc3ad4a94b6b174bde5c33c68a 100644 (file)
@@ -86,7 +86,7 @@
 #ifdef DEBUG
 #define dprintk(X...)  printk(X)
 #else
-#define dprintk(X...)
+#define dprintk(X...)  no_printk(X)
 #endif
 
 #ifndef PCI_SS_VENDOR_ID_SIEMENS_NIXDORF
index 42569264801fcc6d9798c3c45593348a42154287..d40b806461ca5c6f063731f2b9d54fffde120f6e 100644 (file)
@@ -184,7 +184,6 @@ static void mb86290fb_imageblit16(u32 *cmd, u16 step, u16 dx, u16 dy,
 static void mb86290fb_imageblit(struct fb_info *info,
                                const struct fb_image *image)
 {
-       int mdr;
        u32 *cmd = NULL;
        void (*cmdfn) (u32 *, u16, u16, u16, u16, u16, u32, u32,
                       const struct fb_image *, struct fb_info *) = NULL;
@@ -196,7 +195,6 @@ static void mb86290fb_imageblit(struct fb_info *info,
        u16 dx = image->dx, dy = image->dy;
        int x2, y2, vxres, vyres;
 
-       mdr = (GDC_ROP_COPY << 9);
        x2 = image->dx + image->width;
        y2 = image->dy + image->height;
        vxres = info->var.xres_virtual;
index 4af28e4421e5dba8da6c0727d29c3b787d4e5309..603731a5a72ed6d2ae216d7424b543701cb3980c 100644 (file)
@@ -509,7 +509,7 @@ static int sdc_init_panel(struct mx3fb_data *mx3fb, enum ipu_panel panel,
                          uint16_t h_start_width, uint16_t h_sync_width,
                          uint16_t h_end_width, uint16_t v_start_width,
                          uint16_t v_sync_width, uint16_t v_end_width,
-                         struct ipu_di_signal_cfg sig)
+                         const struct ipu_di_signal_cfg *sig)
 {
        unsigned long lock_flags;
        uint32_t reg;
@@ -591,17 +591,17 @@ static int sdc_init_panel(struct mx3fb_data *mx3fb, enum ipu_panel panel,
 
        /* DI settings */
        old_conf = mx3fb_read_reg(mx3fb, DI_DISP_IF_CONF) & 0x78FFFFFF;
-       old_conf |= sig.datamask_en << DI_D3_DATAMSK_SHIFT |
-               sig.clksel_en << DI_D3_CLK_SEL_SHIFT |
-               sig.clkidle_en << DI_D3_CLK_IDLE_SHIFT;
+       old_conf |= sig->datamask_en << DI_D3_DATAMSK_SHIFT |
+               sig->clksel_en << DI_D3_CLK_SEL_SHIFT |
+               sig->clkidle_en << DI_D3_CLK_IDLE_SHIFT;
        mx3fb_write_reg(mx3fb, old_conf, DI_DISP_IF_CONF);
 
        old_conf = mx3fb_read_reg(mx3fb, DI_DISP_SIG_POL) & 0xE0FFFFFF;
-       old_conf |= sig.data_pol << DI_D3_DATA_POL_SHIFT |
-               sig.clk_pol << DI_D3_CLK_POL_SHIFT |
-               sig.enable_pol << DI_D3_DRDY_SHARP_POL_SHIFT |
-               sig.Hsync_pol << DI_D3_HSYNC_POL_SHIFT |
-               sig.Vsync_pol << DI_D3_VSYNC_POL_SHIFT;
+       old_conf |= sig->data_pol << DI_D3_DATA_POL_SHIFT |
+               sig->clk_pol << DI_D3_CLK_POL_SHIFT |
+               sig->enable_pol << DI_D3_DRDY_SHARP_POL_SHIFT |
+               sig->Hsync_pol << DI_D3_HSYNC_POL_SHIFT |
+               sig->Vsync_pol << DI_D3_VSYNC_POL_SHIFT;
        mx3fb_write_reg(mx3fb, old_conf, DI_DISP_SIG_POL);
 
        map = &di_mappings[mx3fb->disp_data_fmt];
@@ -855,7 +855,7 @@ static int __set_par(struct fb_info *fbi, bool lock)
                                   fbi->var.upper_margin,
                                   fbi->var.vsync_len,
                                   fbi->var.lower_margin +
-                                  fbi->var.vsync_len, sig_cfg) != 0) {
+                                  fbi->var.vsync_len, &sig_cfg) != 0) {
                        dev_err(fbi->device,
                                "mx3fb: Error initializing panel.\n");
                        return -EINVAL;
index e8a304f84ea848783a5a2628af7bee0d660d0e84..1a9d6242916e935f841a77d84d2e165e6222842f 100644 (file)
@@ -1247,7 +1247,7 @@ static ssize_t omapfb_show_caps_num(struct device *dev,
        size = 0;
        while (size < PAGE_SIZE && plane < OMAPFB_PLANE_NUM) {
                omapfb_get_caps(fbdev, plane, &caps);
-               size += snprintf(&buf[size], PAGE_SIZE - size,
+               size += scnprintf(&buf[size], PAGE_SIZE - size,
                        "plane#%d %#010x %#010x %#010x\n",
                        plane, caps.ctrl, caps.plane_color, caps.wnd_color);
                plane++;
@@ -1268,28 +1268,28 @@ static ssize_t omapfb_show_caps_text(struct device *dev,
        size = 0;
        while (size < PAGE_SIZE && plane < OMAPFB_PLANE_NUM) {
                omapfb_get_caps(fbdev, plane, &caps);
-               size += snprintf(&buf[size], PAGE_SIZE - size,
+               size += scnprintf(&buf[size], PAGE_SIZE - size,
                                 "plane#%d:\n", plane);
                for (i = 0; i < ARRAY_SIZE(ctrl_caps) &&
                     size < PAGE_SIZE; i++) {
                        if (ctrl_caps[i].flag & caps.ctrl)
-                               size += snprintf(&buf[size], PAGE_SIZE - size,
+                               size += scnprintf(&buf[size], PAGE_SIZE - size,
                                        " %s\n", ctrl_caps[i].name);
                }
-               size += snprintf(&buf[size], PAGE_SIZE - size,
+               size += scnprintf(&buf[size], PAGE_SIZE - size,
                                 " plane colors:\n");
                for (i = 0; i < ARRAY_SIZE(color_caps) &&
                     size < PAGE_SIZE; i++) {
                        if (color_caps[i].flag & caps.plane_color)
-                               size += snprintf(&buf[size], PAGE_SIZE - size,
+                               size += scnprintf(&buf[size], PAGE_SIZE - size,
                                        "  %s\n", color_caps[i].name);
                }
-               size += snprintf(&buf[size], PAGE_SIZE - size,
+               size += scnprintf(&buf[size], PAGE_SIZE - size,
                                 " window colors:\n");
                for (i = 0; i < ARRAY_SIZE(color_caps) &&
                     size < PAGE_SIZE; i++) {
                        if (color_caps[i].flag & caps.wnd_color)
-                               size += snprintf(&buf[size], PAGE_SIZE - size,
+                               size += scnprintf(&buf[size], PAGE_SIZE - size,
                                        "  %s\n", color_caps[i].name);
                }
 
index ce37da85cc4587d980664774f91a4e260120d29c..4a16798b2ecd83efd390303813b92b22b4224d77 100644 (file)
@@ -557,11 +557,6 @@ u32 dispc_mgr_get_sync_lost_irq(enum omap_channel channel)
 }
 EXPORT_SYMBOL(dispc_mgr_get_sync_lost_irq);
 
-u32 dispc_wb_get_framedone_irq(void)
-{
-       return DISPC_IRQ_FRAMEDONEWB;
-}
-
 bool dispc_mgr_go_busy(enum omap_channel channel)
 {
        return mgr_fld_read(channel, DISPC_MGR_FLD_GO) == 1;
@@ -579,30 +574,6 @@ void dispc_mgr_go(enum omap_channel channel)
 }
 EXPORT_SYMBOL(dispc_mgr_go);
 
-bool dispc_wb_go_busy(void)
-{
-       return REG_GET(DISPC_CONTROL2, 6, 6) == 1;
-}
-
-void dispc_wb_go(void)
-{
-       enum omap_plane plane = OMAP_DSS_WB;
-       bool enable, go;
-
-       enable = REG_GET(DISPC_OVL_ATTRIBUTES(plane), 0, 0) == 1;
-
-       if (!enable)
-               return;
-
-       go = REG_GET(DISPC_CONTROL2, 6, 6) == 1;
-       if (go) {
-               DSSERR("GO bit not down for WB\n");
-               return;
-       }
-
-       REG_FLD_MOD(DISPC_CONTROL2, 1, 6, 6);
-}
-
 static void dispc_ovl_write_firh_reg(enum omap_plane plane, int reg, u32 value)
 {
        dispc_write_reg(DISPC_OVL_FIR_COEF_H(plane, reg), value);
@@ -1028,13 +999,6 @@ static enum omap_channel dispc_ovl_get_channel_out(enum omap_plane plane)
        }
 }
 
-void dispc_wb_set_channel_in(enum dss_writeback_channel channel)
-{
-       enum omap_plane plane = OMAP_DSS_WB;
-
-       REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), channel, 18, 16);
-}
-
 static void dispc_ovl_set_burst_size(enum omap_plane plane,
                enum omap_burst_size burst_size)
 {
@@ -2805,74 +2769,6 @@ int dispc_ovl_setup(enum omap_plane plane, const struct omap_overlay_info *oi,
 }
 EXPORT_SYMBOL(dispc_ovl_setup);
 
-int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
-               bool mem_to_mem, const struct omap_video_timings *mgr_timings)
-{
-       int r;
-       u32 l;
-       enum omap_plane plane = OMAP_DSS_WB;
-       const int pos_x = 0, pos_y = 0;
-       const u8 zorder = 0, global_alpha = 0;
-       const bool replication = false;
-       bool truncation;
-       int in_width = mgr_timings->x_res;
-       int in_height = mgr_timings->y_res;
-       enum omap_overlay_caps caps =
-               OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA;
-
-       DSSDBG("dispc_wb_setup, pa %x, pa_uv %x, %d,%d -> %dx%d, cmode %x, "
-               "rot %d, mir %d\n", wi->paddr, wi->p_uv_addr, in_width,
-               in_height, wi->width, wi->height, wi->color_mode, wi->rotation,
-               wi->mirror);
-
-       r = dispc_ovl_setup_common(plane, caps, wi->paddr, wi->p_uv_addr,
-               wi->buf_width, pos_x, pos_y, in_width, in_height, wi->width,
-               wi->height, wi->color_mode, wi->rotation, wi->mirror, zorder,
-               wi->pre_mult_alpha, global_alpha, wi->rotation_type,
-               replication, mgr_timings, mem_to_mem);
-
-       switch (wi->color_mode) {
-       case OMAP_DSS_COLOR_RGB16:
-       case OMAP_DSS_COLOR_RGB24P:
-       case OMAP_DSS_COLOR_ARGB16:
-       case OMAP_DSS_COLOR_RGBA16:
-       case OMAP_DSS_COLOR_RGB12U:
-       case OMAP_DSS_COLOR_ARGB16_1555:
-       case OMAP_DSS_COLOR_XRGB16_1555:
-       case OMAP_DSS_COLOR_RGBX16:
-               truncation = true;
-               break;
-       default:
-               truncation = false;
-               break;
-       }
-
-       /* setup extra DISPC_WB_ATTRIBUTES */
-       l = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
-       l = FLD_MOD(l, truncation, 10, 10);     /* TRUNCATIONENABLE */
-       l = FLD_MOD(l, mem_to_mem, 19, 19);     /* WRITEBACKMODE */
-       if (mem_to_mem)
-               l = FLD_MOD(l, 1, 26, 24);      /* CAPTUREMODE */
-       else
-               l = FLD_MOD(l, 0, 26, 24);      /* CAPTUREMODE */
-       dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), l);
-
-       if (mem_to_mem) {
-               /* WBDELAYCOUNT */
-               REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane), 0, 7, 0);
-       } else {
-               int wbdelay;
-
-               wbdelay = min(mgr_timings->vfp + mgr_timings->vsw +
-                       mgr_timings->vbp, 255);
-
-               /* WBDELAYCOUNT */
-               REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane), wbdelay, 7, 0);
-       }
-
-       return r;
-}
-
 int dispc_ovl_enable(enum omap_plane plane, bool enable)
 {
        DSSDBG("dispc_enable_plane %d, %d\n", plane, enable);
@@ -2903,16 +2799,6 @@ bool dispc_mgr_is_enabled(enum omap_channel channel)
 }
 EXPORT_SYMBOL(dispc_mgr_is_enabled);
 
-void dispc_wb_enable(bool enable)
-{
-       dispc_ovl_enable(OMAP_DSS_WB, enable);
-}
-
-bool dispc_wb_is_enabled(void)
-{
-       return dispc_ovl_enabled(OMAP_DSS_WB);
-}
-
 static void dispc_lcd_enable_signal_polarity(bool act_high)
 {
        if (!dss_has_feature(FEAT_LCDENABLEPOL))
index a2269008590ff96de2133396a6a724ee6698529f..21cfcbf74a6d9d10fec2fb970570e73ffdbe7b91 100644 (file)
@@ -89,17 +89,6 @@ enum dss_dsi_content_type {
        DSS_DSI_CONTENT_GENERIC,
 };
 
-enum dss_writeback_channel {
-       DSS_WB_LCD1_MGR =       0,
-       DSS_WB_LCD2_MGR =       1,
-       DSS_WB_TV_MGR =         2,
-       DSS_WB_OVL0 =           3,
-       DSS_WB_OVL1 =           4,
-       DSS_WB_OVL2 =           5,
-       DSS_WB_OVL3 =           6,
-       DSS_WB_LCD3_MGR =       7,
-};
-
 enum dss_pll_id {
        DSS_PLL_DSI1,
        DSS_PLL_DSI2,
@@ -403,15 +392,6 @@ int dispc_mgr_get_clock_div(enum omap_channel channel,
                struct dispc_clock_info *cinfo);
 void dispc_set_tv_pclk(unsigned long pclk);
 
-u32 dispc_wb_get_framedone_irq(void);
-bool dispc_wb_go_busy(void);
-void dispc_wb_go(void);
-void dispc_wb_enable(bool enable);
-bool dispc_wb_is_enabled(void);
-void dispc_wb_set_channel_in(enum dss_writeback_channel channel);
-int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
-               bool mem_to_mem, const struct omap_video_timings *timings);
-
 u32 dispc_read_irqstatus(void);
 void dispc_clear_irqstatus(u32 mask);
 u32 dispc_read_irqenable(void);
index f81e2a46366dda0aaccae8bc831fafcc43e3efd4..d5404d56c922fdaed92f889d7c910261fe80a920 100644 (file)
@@ -209,49 +209,6 @@ static const struct venc_config venc_config_ntsc_trm = {
        .gen_ctrl                               = 0x00F90000,
 };
 
-static const struct venc_config venc_config_pal_bdghi = {
-       .f_control                              = 0,
-       .vidout_ctrl                            = 0,
-       .sync_ctrl                              = 0,
-       .hfltr_ctrl                             = 0,
-       .x_color                                = 0,
-       .line21                                 = 0,
-       .ln_sel                                 = 21,
-       .htrigger_vtrigger                      = 0,
-       .tvdetgp_int_start_stop_x               = 0x00140001,
-       .tvdetgp_int_start_stop_y               = 0x00010001,
-       .gen_ctrl                               = 0x00FB0000,
-
-       .llen                                   = 864-1,
-       .flens                                  = 625-1,
-       .cc_carr_wss_carr                       = 0x2F7625ED,
-       .c_phase                                = 0xDF,
-       .gain_u                                 = 0x111,
-       .gain_v                                 = 0x181,
-       .gain_y                                 = 0x140,
-       .black_level                            = 0x3e,
-       .blank_level                            = 0x3e,
-       .m_control                              = 0<<2 | 1<<1,
-       .bstamp_wss_data                        = 0x42,
-       .s_carr                                 = 0x2a098acb,
-       .l21__wc_ctl                            = 0<<13 | 0x16<<8 | 0<<0,
-       .savid__eavid                           = 0x06A70108,
-       .flen__fal                              = 23<<16 | 624<<0,
-       .lal__phase_reset                       = 2<<17 | 310<<0,
-       .hs_int_start_stop_x                    = 0x00920358,
-       .hs_ext_start_stop_x                    = 0x000F035F,
-       .vs_int_start_x                         = 0x1a7<<16,
-       .vs_int_stop_x__vs_int_start_y          = 0x000601A7,
-       .vs_int_stop_y__vs_ext_start_x          = 0x01AF0036,
-       .vs_ext_stop_x__vs_ext_start_y          = 0x27101af,
-       .vs_ext_stop_y                          = 0x05,
-       .avid_start_stop_x                      = 0x03530082,
-       .avid_start_stop_y                      = 0x0270002E,
-       .fid_int_start_x__fid_int_start_y       = 0x0005008A,
-       .fid_int_offset_y__fid_ext_start_x      = 0x002E0138,
-       .fid_ext_start_y__fid_ext_offset_y      = 0x01380005,
-};
-
 const struct omap_video_timings omap_dss_pal_timings = {
        .x_res          = 720,
        .y_res          = 574,
index 4a5db170ef59d82de4721d3fb04b7cd5cabb8424..2d39dbfa742e77abd11e50ae8bb0f05d026f027b 100644 (file)
@@ -147,11 +147,11 @@ static ssize_t show_overlays(struct device *dev,
                        if (ovl == fbdev->overlays[ovlnum])
                                break;
 
-               l += snprintf(buf + l, PAGE_SIZE - l, "%s%d",
+               l += scnprintf(buf + l, PAGE_SIZE - l, "%s%d",
                                t == 0 ? "" : ",", ovlnum);
        }
 
-       l += snprintf(buf + l, PAGE_SIZE - l, "\n");
+       l += scnprintf(buf + l, PAGE_SIZE - l, "\n");
 
        omapfb_unlock(fbdev);
        unlock_fb_info(fbi);
@@ -328,11 +328,11 @@ static ssize_t show_overlays_rotate(struct device *dev,
        lock_fb_info(fbi);
 
        for (t = 0; t < ofbi->num_overlays; t++) {
-               l += snprintf(buf + l, PAGE_SIZE - l, "%s%d",
+               l += scnprintf(buf + l, PAGE_SIZE - l, "%s%d",
                                t == 0 ? "" : ",", ofbi->rotation[t]);
        }
 
-       l += snprintf(buf + l, PAGE_SIZE - l, "\n");
+       l += scnprintf(buf + l, PAGE_SIZE - l, "\n");
 
        unlock_fb_info(fbi);
 
index fe2cadeb1b66429159529ef4d77ab7272c3934ea..c7c98d8e235928f538e04f805b7fe0faa3daf358 100644 (file)
@@ -54,7 +54,7 @@
 #define DPRINTK(a, b...)       \
        printk(KERN_DEBUG "pm2fb: %s: " a, __func__ , ## b)
 #else
-#define DPRINTK(a, b...)
+#define DPRINTK(a, b...)       no_printk(a, ##b)
 #endif
 
 #define PM2_PIXMAP_SIZE        (1600 * 4)
index 2f5e23c8f8ec2bf0d1473ce8384932b9d3775764..7497bd36334c015f07c4b78de6eba81943d43944 100644 (file)
@@ -44,7 +44,7 @@
 #define DPRINTK(a, b...)       \
        printk(KERN_DEBUG "pm3fb: %s: " a, __func__ , ## b)
 #else
-#define DPRINTK(a, b...)
+#define DPRINTK(a, b...)       no_printk(a, ##b)
 #endif
 
 #define PM3_PIXMAP_SIZE        (2048 * 4)
@@ -306,7 +306,7 @@ static void pm3fb_init_engine(struct fb_info *info)
                                           PM3PixelSize_GLOBAL_32BIT);
                        break;
                default:
-                       DPRINTK(1, "Unsupported depth %d\n",
+                       DPRINTK("Unsupported depth %d\n",
                                info->var.bits_per_pixel);
                        break;
                }
@@ -349,8 +349,8 @@ static void pm3fb_init_engine(struct fb_info *info)
                                           (1 << 10) | (0 << 3));
                        break;
                default:
-                       DPRINTK(1, "Unsupported depth %d\n",
-                               info->current_par->depth);
+                       DPRINTK("Unsupported depth %d\n",
+                               info->var.bits_per_pixel);
                        break;
                }
        }
index aef8a3042590dd367f78b3081819853b7e63f301..eedfbd3572a8faa41db5a80c2704e7fba5174ff1 100644 (file)
@@ -557,12 +557,11 @@ static const struct fb_ops pxa168fb_ops = {
        .fb_imageblit   = cfb_imageblit,
 };
 
-static int pxa168fb_init_mode(struct fb_info *info,
+static void pxa168fb_init_mode(struct fb_info *info,
                              struct pxa168fb_mach_info *mi)
 {
        struct pxa168fb_info *fbi = info->par;
        struct fb_var_screeninfo *var = &info->var;
-       int ret = 0;
        u32 total_w, total_h, refresh;
        u64 div_result;
        const struct fb_videomode *m;
@@ -593,8 +592,6 @@ static int pxa168fb_init_mode(struct fb_info *info,
        div_result = 1000000000000ll;
        do_div(div_result, total_w * total_h * refresh);
        var->pixclock = (u32)div_result;
-
-       return ret;
 }
 
 static int pxa168fb_probe(struct platform_device *pdev)
index 0601c13f21050970f7112b6f9be52034827a343d..08c9ee46978ef6d7c2cae8c0cd5e65d66d82712d 100644 (file)
@@ -1343,24 +1343,6 @@ int CalcStateExt
 /*
  * Load fixed function state and pre-calculated/stored state.
  */
-#if 0
-#define LOAD_FIXED_STATE(tbl,dev)                                       \
-    for (i = 0; i < sizeof(tbl##Table##dev)/8; i++)                 \
-        chip->dev[tbl##Table##dev[i][0]] = tbl##Table##dev[i][1]
-#define LOAD_FIXED_STATE_8BPP(tbl,dev)                                  \
-    for (i = 0; i < sizeof(tbl##Table##dev##_8BPP)/8; i++)            \
-        chip->dev[tbl##Table##dev##_8BPP[i][0]] = tbl##Table##dev##_8BPP[i][1]
-#define LOAD_FIXED_STATE_15BPP(tbl,dev)                                 \
-    for (i = 0; i < sizeof(tbl##Table##dev##_15BPP)/8; i++)           \
-        chip->dev[tbl##Table##dev##_15BPP[i][0]] = tbl##Table##dev##_15BPP[i][1]
-#define LOAD_FIXED_STATE_16BPP(tbl,dev)                                 \
-    for (i = 0; i < sizeof(tbl##Table##dev##_16BPP)/8; i++)           \
-        chip->dev[tbl##Table##dev##_16BPP[i][0]] = tbl##Table##dev##_16BPP[i][1]
-#define LOAD_FIXED_STATE_32BPP(tbl,dev)                                 \
-    for (i = 0; i < sizeof(tbl##Table##dev##_32BPP)/8; i++)           \
-        chip->dev[tbl##Table##dev##_32BPP[i][0]] = tbl##Table##dev##_32BPP[i][1]
-#endif
-
 #define LOAD_FIXED_STATE(tbl,dev)                                       \
     for (i = 0; i < sizeof(tbl##Table##dev)/8; i++)                 \
         NV_WR32(&chip->dev[tbl##Table##dev[i][0]], 0, tbl##Table##dev[i][1])
index eaea8c37375348a666e6bd60cc9b149cfe88af6e..4541afcf9386ec25d640772de014c02d3f4d2869 100644 (file)
@@ -721,9 +721,7 @@ static void s1d13xxxfb_fetch_hw_state(struct fb_info *info)
                xres, yres, xres_virtual, yres_virtual, is_color, is_dual, is_tft);
 }
 
-
-static int
-s1d13xxxfb_remove(struct platform_device *pdev)
+static void __s1d13xxxfb_remove(struct platform_device *pdev)
 {
        struct fb_info *info = platform_get_drvdata(pdev);
        struct s1d13xxxfb_par *par = NULL;
@@ -749,6 +747,14 @@ s1d13xxxfb_remove(struct platform_device *pdev)
                           resource_size(&pdev->resource[0]));
        release_mem_region(pdev->resource[1].start,
                           resource_size(&pdev->resource[1]));
+}
+
+static int s1d13xxxfb_remove(struct platform_device *pdev)
+{
+       struct fb_info *info = platform_get_drvdata(pdev);
+
+       unregister_framebuffer(info);
+       __s1d13xxxfb_remove(pdev);
        return 0;
 }
 
@@ -895,7 +901,7 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
        return 0;
 
 bail:
-       s1d13xxxfb_remove(pdev);
+       __s1d13xxxfb_remove(pdev);
        return ret;
 
 }
index 2d285cc384cfd8b6732daa5b573d5c4757d3a8f0..3e6e13f7a831968c59edc1e136733c3618f6cebf 100644 (file)
 #include <linux/init.h>
 #include <linux/ioport.h>
 #include <linux/cpufreq.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/platform_device.h>
 #include <linux/dma-mapping.h>
 #include <linux/mutex.h>
@@ -799,8 +799,8 @@ static void sa1100fb_enable_controller(struct sa1100fb_info *fbi)
        writel_relaxed(fbi->dbar2, fbi->base + DBAR2);
        writel_relaxed(fbi->reg_lccr0 | LCCR0_LEN, fbi->base + LCCR0);
 
-       if (machine_is_shannon())
-               gpio_set_value(SHANNON_GPIO_DISP_EN, 1);
+       if (fbi->shannon_lcden)
+               gpiod_set_value(fbi->shannon_lcden, 1);
 
        dev_dbg(fbi->dev, "DBAR1: 0x%08x\n", readl_relaxed(fbi->base + DBAR1));
        dev_dbg(fbi->dev, "DBAR2: 0x%08x\n", readl_relaxed(fbi->base + DBAR2));
@@ -817,8 +817,8 @@ static void sa1100fb_disable_controller(struct sa1100fb_info *fbi)
 
        dev_dbg(fbi->dev, "Disabling LCD controller\n");
 
-       if (machine_is_shannon())
-               gpio_set_value(SHANNON_GPIO_DISP_EN, 0);
+       if (fbi->shannon_lcden)
+               gpiod_set_value(fbi->shannon_lcden, 0);
 
        set_current_state(TASK_UNINTERRUPTIBLE);
        add_wait_queue(&fbi->ctrlr_wait, &wait);
@@ -1173,12 +1173,10 @@ static int sa1100fb_probe(struct platform_device *pdev)
                return ret;
        }
 
-       if (machine_is_shannon()) {
-               ret = devm_gpio_request_one(&pdev->dev, SHANNON_GPIO_DISP_EN,
-                       GPIOF_OUT_INIT_LOW, "display enable");
-               if (ret)
-                       return ret;
-       }
+       fbi->shannon_lcden = gpiod_get_optional(&pdev->dev, "shannon-lcden",
+                                               GPIOD_OUT_LOW);
+       if (IS_ERR(fbi->shannon_lcden))
+               return PTR_ERR(fbi->shannon_lcden);
 
        /* Initialize video memory */
        ret = sa1100fb_map_video_memory(fbi);
index d0aa33b0b88a8c07c0f94450fec8cdcf9458c4f0..b4363444fa5dd35b2453a212f80e069d2f04cc02 100644 (file)
@@ -10,6 +10,8 @@
  * for more details.
  */
 
+struct gpio_desc;
+
 #define LCCR0           0x0000          /* LCD Control Reg. 0 */
 #define LCSR            0x0004          /* LCD Status Reg. */
 #define DBAR1           0x0010          /* LCD DMA Base Address Reg. channel 1 */
@@ -33,6 +35,7 @@ struct sa1100fb_info {
        struct device           *dev;
        const struct sa1100fb_rgb *rgb[NR_RGB];
        void __iomem            *base;
+       struct gpio_desc        *shannon_lcden;
 
        /*
         * These are the addresses we mapped
index aba04afe712d02afdddc3c2dc7d1b736e2a7ae2e..3314d5b6b43ba6e0c812ebf551669aaee41da307 100644 (file)
@@ -21,7 +21,7 @@
 #ifdef SAVAGEFB_DEBUG
 # define DBG(x)                printk (KERN_DEBUG "savagefb: %s\n", (x));
 #else
-# define DBG(x)
+# define DBG(x)                no_printk(x)
 # define SavagePrintRegs(...)
 #endif
 
index 12fa1050f3eb9d54f2673e46425e6e53a0652707..8e06ba912d60adc8ef4dbbda480e2f348ae674c9 100644 (file)
@@ -12,8 +12,7 @@
 #include <linux/i2c.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_gpio.h>
+#include <linux/property.h>
 #include <linux/pwm.h>
 #include <linux/uaccess.h>
 #include <linux/regulator/consumer.h>
@@ -49,8 +48,6 @@
 static u_int refreshrate = REFRESHRATE;
 module_param(refreshrate, uint, 0);
 
-struct ssd1307fb_par;
-
 struct ssd1307fb_deviceinfo {
        u32 default_vcomh;
        u32 default_dclk_div;
@@ -80,7 +77,6 @@ struct ssd1307fb_par {
        u32 prechargep1;
        u32 prechargep2;
        struct pwm_device *pwm;
-       u32 pwm_period;
        struct gpio_desc *reset;
        struct regulator *vbat_reg;
        u32 vcomh;
@@ -298,9 +294,9 @@ static void ssd1307fb_deferred_io(struct fb_info *info,
 
 static int ssd1307fb_init(struct ssd1307fb_par *par)
 {
+       struct pwm_state pwmstate;
        int ret;
        u32 precharge, dclk, com_invdir, compins;
-       struct pwm_args pargs;
 
        if (par->device_info->need_pwm) {
                par->pwm = pwm_get(&par->client->dev, NULL);
@@ -309,21 +305,15 @@ static int ssd1307fb_init(struct ssd1307fb_par *par)
                        return PTR_ERR(par->pwm);
                }
 
-               /*
-                * FIXME: pwm_apply_args() should be removed when switching to
-                * the atomic PWM API.
-                */
-               pwm_apply_args(par->pwm);
-
-               pwm_get_args(par->pwm, &pargs);
+               pwm_init_state(par->pwm, &pwmstate);
+               pwm_set_relative_duty_cycle(&pwmstate, 50, 100);
+               pwm_apply_state(par->pwm, &pwmstate);
 
-               par->pwm_period = pargs.period;
                /* Enable the PWM */
-               pwm_config(par->pwm, par->pwm_period / 2, par->pwm_period);
                pwm_enable(par->pwm);
 
                dev_dbg(&par->client->dev, "Using PWM%d with a %dns period.\n",
-                       par->pwm->pwm, par->pwm_period);
+                       par->pwm->pwm, pwm_get_period(par->pwm));
        }
 
        /* Set initial contrast */
@@ -586,25 +576,19 @@ static const struct of_device_id ssd1307fb_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, ssd1307fb_of_match);
 
-static int ssd1307fb_probe(struct i2c_client *client,
-                          const struct i2c_device_id *id)
+static int ssd1307fb_probe(struct i2c_client *client)
 {
+       struct device *dev = &client->dev;
        struct backlight_device *bl;
        char bl_name[12];
        struct fb_info *info;
-       struct device_node *node = client->dev.of_node;
        struct fb_deferred_io *ssd1307fb_defio;
        u32 vmem_size;
        struct ssd1307fb_par *par;
        void *vmem;
        int ret;
 
-       if (!node) {
-               dev_err(&client->dev, "No device tree data found!\n");
-               return -EINVAL;
-       }
-
-       info = framebuffer_alloc(sizeof(struct ssd1307fb_par), &client->dev);
+       info = framebuffer_alloc(sizeof(struct ssd1307fb_par), dev);
        if (!info)
                return -ENOMEM;
 
@@ -612,67 +596,65 @@ static int ssd1307fb_probe(struct i2c_client *client,
        par->info = info;
        par->client = client;
 
-       par->device_info = of_device_get_match_data(&client->dev);
+       par->device_info = device_get_match_data(dev);
 
-       par->reset = devm_gpiod_get_optional(&client->dev, "reset",
-                                            GPIOD_OUT_LOW);
+       par->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
        if (IS_ERR(par->reset)) {
-               dev_err(&client->dev, "failed to get reset gpio: %ld\n",
+               dev_err(dev, "failed to get reset gpio: %ld\n",
                        PTR_ERR(par->reset));
                ret = PTR_ERR(par->reset);
                goto fb_alloc_error;
        }
 
-       par->vbat_reg = devm_regulator_get_optional(&client->dev, "vbat");
+       par->vbat_reg = devm_regulator_get_optional(dev, "vbat");
        if (IS_ERR(par->vbat_reg)) {
                ret = PTR_ERR(par->vbat_reg);
                if (ret == -ENODEV) {
                        par->vbat_reg = NULL;
                } else {
-                       dev_err(&client->dev, "failed to get VBAT regulator: %d\n",
-                               ret);
+                       dev_err(dev, "failed to get VBAT regulator: %d\n", ret);
                        goto fb_alloc_error;
                }
        }
 
-       if (of_property_read_u32(node, "solomon,width", &par->width))
+       if (device_property_read_u32(dev, "solomon,width", &par->width))
                par->width = 96;
 
-       if (of_property_read_u32(node, "solomon,height", &par->height))
+       if (device_property_read_u32(dev, "solomon,height", &par->height))
                par->height = 16;
 
-       if (of_property_read_u32(node, "solomon,page-offset", &par->page_offset))
+       if (device_property_read_u32(dev, "solomon,page-offset", &par->page_offset))
                par->page_offset = 1;
 
-       if (of_property_read_u32(node, "solomon,com-offset", &par->com_offset))
+       if (device_property_read_u32(dev, "solomon,com-offset", &par->com_offset))
                par->com_offset = 0;
 
-       if (of_property_read_u32(node, "solomon,prechargep1", &par->prechargep1))
+       if (device_property_read_u32(dev, "solomon,prechargep1", &par->prechargep1))
                par->prechargep1 = 2;
 
-       if (of_property_read_u32(node, "solomon,prechargep2", &par->prechargep2))
+       if (device_property_read_u32(dev, "solomon,prechargep2", &par->prechargep2))
                par->prechargep2 = 2;
 
-       if (!of_property_read_u8_array(node, "solomon,lookup-table",
-                                      par->lookup_table,
-                                      ARRAY_SIZE(par->lookup_table)))
+       if (!device_property_read_u8_array(dev, "solomon,lookup-table",
+                                          par->lookup_table,
+                                          ARRAY_SIZE(par->lookup_table)))
                par->lookup_table_set = 1;
 
-       par->seg_remap = !of_property_read_bool(node, "solomon,segment-no-remap");
-       par->com_seq = of_property_read_bool(node, "solomon,com-seq");
-       par->com_lrremap = of_property_read_bool(node, "solomon,com-lrremap");
-       par->com_invdir = of_property_read_bool(node, "solomon,com-invdir");
+       par->seg_remap = !device_property_read_bool(dev, "solomon,segment-no-remap");
+       par->com_seq = device_property_read_bool(dev, "solomon,com-seq");
+       par->com_lrremap = device_property_read_bool(dev, "solomon,com-lrremap");
+       par->com_invdir = device_property_read_bool(dev, "solomon,com-invdir");
        par->area_color_enable =
-               of_property_read_bool(node, "solomon,area-color-enable");
-       par->low_power = of_property_read_bool(node, "solomon,low-power");
+               device_property_read_bool(dev, "solomon,area-color-enable");
+       par->low_power = device_property_read_bool(dev, "solomon,low-power");
 
        par->contrast = 127;
        par->vcomh = par->device_info->default_vcomh;
 
        /* Setup display timing */
-       if (of_property_read_u32(node, "solomon,dclk-div", &par->dclk_div))
+       if (device_property_read_u32(dev, "solomon,dclk-div", &par->dclk_div))
                par->dclk_div = par->device_info->default_dclk_div;
-       if (of_property_read_u32(node, "solomon,dclk-frq", &par->dclk_frq))
+       if (device_property_read_u32(dev, "solomon,dclk-frq", &par->dclk_frq))
                par->dclk_frq = par->device_info->default_dclk_frq;
 
        vmem_size = DIV_ROUND_UP(par->width, 8) * par->height;
@@ -680,15 +662,15 @@ static int ssd1307fb_probe(struct i2c_client *client,
        vmem = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
                                        get_order(vmem_size));
        if (!vmem) {
-               dev_err(&client->dev, "Couldn't allocate graphical memory.\n");
+               dev_err(dev, "Couldn't allocate graphical memory.\n");
                ret = -ENOMEM;
                goto fb_alloc_error;
        }
 
-       ssd1307fb_defio = devm_kzalloc(&client->dev, sizeof(*ssd1307fb_defio),
+       ssd1307fb_defio = devm_kzalloc(dev, sizeof(*ssd1307fb_defio),
                                       GFP_KERNEL);
        if (!ssd1307fb_defio) {
-               dev_err(&client->dev, "Couldn't allocate deferred io.\n");
+               dev_err(dev, "Couldn't allocate deferred io.\n");
                ret = -ENOMEM;
                goto fb_alloc_error;
        }
@@ -726,8 +708,7 @@ static int ssd1307fb_probe(struct i2c_client *client,
        if (par->vbat_reg) {
                ret = regulator_enable(par->vbat_reg);
                if (ret) {
-                       dev_err(&client->dev, "failed to enable VBAT: %d\n",
-                               ret);
+                       dev_err(dev, "failed to enable VBAT: %d\n", ret);
                        goto reset_oled_error;
                }
        }
@@ -738,17 +719,16 @@ static int ssd1307fb_probe(struct i2c_client *client,
 
        ret = register_framebuffer(info);
        if (ret) {
-               dev_err(&client->dev, "Couldn't register the framebuffer\n");
+               dev_err(dev, "Couldn't register the framebuffer\n");
                goto panel_init_error;
        }
 
        snprintf(bl_name, sizeof(bl_name), "ssd1307fb%d", info->node);
-       bl = backlight_device_register(bl_name, &client->dev, par,
-                                      &ssd1307fb_bl_ops, NULL);
+       bl = backlight_device_register(bl_name, dev, par, &ssd1307fb_bl_ops,
+                                      NULL);
        if (IS_ERR(bl)) {
                ret = PTR_ERR(bl);
-               dev_err(&client->dev, "unable to register backlight device: %d\n",
-                       ret);
+               dev_err(dev, "unable to register backlight device: %d\n", ret);
                goto bl_init_error;
        }
 
@@ -756,7 +736,7 @@ static int ssd1307fb_probe(struct i2c_client *client,
        bl->props.max_brightness = MAX_CONTRAST;
        info->bl_dev = bl;
 
-       dev_info(&client->dev, "fb%d: %s framebuffer device registered, using %d bytes of video memory\n", info->node, info->fix.id, vmem_size);
+       dev_info(dev, "fb%d: %s framebuffer device registered, using %d bytes of video memory\n", info->node, info->fix.id, vmem_size);
 
        return 0;
 
@@ -810,7 +790,7 @@ static const struct i2c_device_id ssd1307fb_i2c_id[] = {
 MODULE_DEVICE_TABLE(i2c, ssd1307fb_i2c_id);
 
 static struct i2c_driver ssd1307fb_driver = {
-       .probe = ssd1307fb_probe,
+       .probe_new = ssd1307fb_probe,
        .remove = ssd1307fb_remove,
        .id_table = ssd1307fb_i2c_id,
        .driver = {
index 07905d385949ec3321df82c0b410ff9b75aa4fac..5b014b479f831b8468886ceacb1c1d70cae165fb 100644 (file)
@@ -64,9 +64,9 @@ static const struct usb_device_id id_table[] = {
 MODULE_DEVICE_TABLE(usb, id_table);
 
 /* module options */
-static bool console = 1; /* Allow fbcon to open framebuffer */
-static bool fb_defio = 1;  /* Detect mmap writes using page faults */
-static bool shadow = 1; /* Optionally disable shadow framebuffer */
+static bool console = true; /* Allow fbcon to open framebuffer */
+static bool fb_defio = true;  /* Detect mmap writes using page faults */
+static bool shadow = true; /* Optionally disable shadow framebuffer */
 static int pixel_limit; /* Optionally force a pixel resolution limit */
 
 struct dlfb_deferred_free {
index 53d08d1b56f553857bd7554097af47b72ed046a3..bee29aadc6460e40c73a4d3a6c38b7a34724ac1b 100644 (file)
@@ -45,7 +45,7 @@ static const struct fb_fix_screeninfo uvesafb_fix = {
 };
 
 static int mtrr                = 3;    /* enable mtrr by default */
-static bool blank      = 1;    /* enable blanking by default */
+static bool blank      = true; /* enable blanking by default */
 static int ypan                = 1;    /* 0: scroll, 1: ypan, 2: ywrap */
 static bool pmi_setpal = true; /* use PMI for palette changes */
 static bool nocrtc;            /* ignore CRTC settings */
@@ -1560,7 +1560,7 @@ static ssize_t uvesafb_show_vbe_modes(struct device *dev,
        int ret = 0, i;
 
        for (i = 0; i < par->vbe_modes_cnt && ret < PAGE_SIZE; i++) {
-               ret += snprintf(buf + ret, PAGE_SIZE - ret,
+               ret += scnprintf(buf + ret, PAGE_SIZE - ret,
                        "%dx%d-%d, 0x%.4x\n",
                        par->vbe_modes[i].x_res, par->vbe_modes[i].y_res,
                        par->vbe_modes[i].depth, par->vbe_modes[i].mode_id);
@@ -1824,19 +1824,19 @@ static int uvesafb_setup(char *options)
                else if (!strcmp(this_opt, "ywrap"))
                        ypan = 2;
                else if (!strcmp(this_opt, "vgapal"))
-                       pmi_setpal = 0;
+                       pmi_setpal = false;
                else if (!strcmp(this_opt, "pmipal"))
-                       pmi_setpal = 1;
+                       pmi_setpal = true;
                else if (!strncmp(this_opt, "mtrr:", 5))
                        mtrr = simple_strtoul(this_opt+5, NULL, 0);
                else if (!strcmp(this_opt, "nomtrr"))
                        mtrr = 0;
                else if (!strcmp(this_opt, "nocrtc"))
-                       nocrtc = 1;
+                       nocrtc = true;
                else if (!strcmp(this_opt, "noedid"))
-                       noedid = 1;
+                       noedid = true;
                else if (!strcmp(this_opt, "noblank"))
-                       blank = 0;
+                       blank = true;
                else if (!strncmp(this_opt, "vtotal:", 7))
                        vram_total = simple_strtoul(this_opt + 7, NULL, 0);
                else if (!strncmp(this_opt, "vremap:", 7))
index 4d20c4603e5ae7529545c1b60654cddbda28b954..8425afe37d7c08890491eb72017fa32b88f71feb 100644 (file)
@@ -331,7 +331,7 @@ int __init valkyriefb_init(void)
                struct resource r;
 
                dp = of_find_node_by_name(NULL, "valkyrie");
-               if (dp == 0)
+               if (!dp)
                        return 0;
 
                if (of_address_to_resource(dp, 0, &r)) {
@@ -345,7 +345,7 @@ int __init valkyriefb_init(void)
 #endif /* ppc (!CONFIG_MAC) */
 
        p = kzalloc(sizeof(*p), GFP_ATOMIC);
-       if (p == 0)
+       if (!p)
                return -ENOMEM;
 
        /* Map in frame buffer and registers */
index a1fe24ea869b8b287bd93dd751743709f046b214..df6de5a9dd4cd9981ac11cc70e38e4309dde7cc0 100644 (file)
@@ -32,6 +32,7 @@
 struct vesafb_par {
        u32 pseudo_palette[256];
        int wc_cookie;
+       struct resource *region;
 };
 
 static struct fb_var_screeninfo vesafb_defined = {
@@ -411,7 +412,7 @@ static int vesafb_probe(struct platform_device *dev)
 
        /* request failure does not faze us, as vgacon probably has this
         * region already (FIXME) */
-       request_region(0x3c0, 32, "vesafb");
+       par->region = request_region(0x3c0, 32, "vesafb");
 
        if (mtrr == 3) {
                unsigned int temp_size = size_total;
@@ -439,7 +440,7 @@ static int vesafb_probe(struct platform_device *dev)
                       "vesafb: abort, cannot ioremap video memory 0x%x @ 0x%lx\n",
                        vesafb_fix.smem_len, vesafb_fix.smem_start);
                err = -EIO;
-               goto err;
+               goto err_release_region;
        }
 
        printk(KERN_INFO "vesafb: framebuffer at 0x%lx, mapped to 0x%p, "
@@ -458,19 +459,22 @@ static int vesafb_probe(struct platform_device *dev)
 
        if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
                err = -ENOMEM;
-               goto err;
+               goto err_release_region;
        }
        if (register_framebuffer(info)<0) {
                err = -EINVAL;
                fb_dealloc_cmap(&info->cmap);
-               goto err;
+               goto err_release_region;
        }
        fb_info(info, "%s frame buffer device\n", info->fix.id);
        return 0;
-err:
+err_release_region:
        arch_phys_wc_del(par->wc_cookie);
        if (info->screen_base)
                iounmap(info->screen_base);
+       if (par->region)
+               release_region(0x3c0, 32);
+err:
        framebuffer_release(info);
        release_mem_region(vesafb_fix.smem_start, size_total);
        return err;
@@ -481,6 +485,8 @@ static int vesafb_remove(struct platform_device *pdev)
        struct fb_info *info = platform_get_drvdata(pdev);
 
        unregister_framebuffer(info);
+       if (((struct vesafb_par *)(info->par))->region)
+               release_region(0x3c0, 32);
        framebuffer_release(info);
 
        return 0;
index 6a320bd76936daede721ce44fa884f4a032e53fd..80fdfe4171c539409be29d86ecefa7f17453ac1b 100644 (file)
@@ -7,6 +7,8 @@
 #ifndef __DEBUG_H__
 #define __DEBUG_H__
 
+#include <linux/printk.h>
+
 #ifndef VIAFB_DEBUG
 #define VIAFB_DEBUG 0
 #endif
 #if VIAFB_DEBUG
 #define DEBUG_MSG(f, a...)   printk(f, ## a)
 #else
-#define DEBUG_MSG(f, a...)
+#define DEBUG_MSG(f, a...)   no_printk(f, ## a)
 #endif
 
 #define VIAFB_WARN 0
 #if VIAFB_WARN
 #define WARN_MSG(f, a...)   printk(f, ## a)
 #else
-#define WARN_MSG(f, a...)
+#define WARN_MSG(f, a...)   no_printk(f, ## a)
 #endif
 
 #endif /* __DEBUG_H__ */
index 852673c40a2f35d3100f4a9eaf48fa5dbc8c4baa..22deb340a0484fe394d96ac0c5c24a9b497de235 100644 (file)
@@ -1144,7 +1144,7 @@ static ssize_t viafb_dvp0_proc_write(struct file *file,
                if (value != NULL) {
                        if (kstrtou8(value, 0, &reg_val) < 0)
                                return -EINVAL;
-                       DEBUG_MSG(KERN_INFO "DVP0:reg_val[%l]=:%x\n", i,
+                       DEBUG_MSG(KERN_INFO "DVP0:reg_val[%lu]=:%x\n", i,
                                  reg_val);
                        switch (i) {
                        case 0:
index f744479dc7df63356af8f399ddabda40193cd748..c61476247ba8ddbbdc887acafef00eda91f7ff27 100644 (file)
@@ -230,6 +230,7 @@ static int vt8500lcd_blank(int blank, struct fb_info *info)
                    info->fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR)
                        for (i = 0; i < 256; i++)
                                vt8500lcd_setcolreg(i, 0, 0, 0, 0, info);
+               fallthrough;
        case FB_BLANK_UNBLANK:
                if (info->fix.visual == FB_VISUAL_PSEUDOCOLOR ||
                    info->fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR)
index 2d6e2738b792c584abefe7492881ab47027f4c38..d96ab28f8ce4ae54b384c3a4d8f2b9bfa085187e 100644 (file)
@@ -588,6 +588,7 @@ static void w100fb_restore_vidmem(struct w100fb_par *par)
                memsize=par->mach->mem->size;
                memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), par->saved_extmem, memsize);
                vfree(par->saved_extmem);
+               par->saved_extmem = NULL;
        }
        if (par->saved_intmem) {
                memsize=MEM_INT_SIZE;
@@ -596,6 +597,7 @@ static void w100fb_restore_vidmem(struct w100fb_par *par)
                else
                        memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), par->saved_intmem, memsize);
                vfree(par->saved_intmem);
+               par->saved_intmem = NULL;
        }
 }
 
index 856a8c4e84a25a895d355ace01da87dbe201edfb..e70792b3e367488667df008803bdf66d8c358ead 100644 (file)
@@ -1768,20 +1768,21 @@ hdmi_vendor_any_infoframe_unpack(union hdmi_vendor_any_infoframe *frame,
 }
 
 /**
- * hdmi_drm_infoframe_unpack() - unpack binary buffer to a HDMI DRM infoframe
+ * hdmi_drm_infoframe_unpack_only() - unpack binary buffer of CTA-861-G DRM
+ *                                    infoframe DataBytes to a HDMI DRM
+ *                                    infoframe
  * @frame: HDMI DRM infoframe
  * @buffer: source buffer
  * @size: size of buffer
  *
- * Unpacks the information contained in binary @buffer into a structured
- * @frame of the HDMI Dynamic Range and Mastering (DRM) information frame.
- * Also verifies the checksum as required by section 5.3.5 of the HDMI 1.4
- * specification.
+ * Unpacks CTA-861-G DRM infoframe DataBytes contained in the binary @buffer
+ * into a structured @frame of the HDMI Dynamic Range and Mastering (DRM)
+ * infoframe.
  *
  * Returns 0 on success or a negative error code on failure.
  */
-static int hdmi_drm_infoframe_unpack(struct hdmi_drm_infoframe *frame,
-                                    const void *buffer, size_t size)
+int hdmi_drm_infoframe_unpack_only(struct hdmi_drm_infoframe *frame,
+                                  const void *buffer, size_t size)
 {
        const u8 *ptr = buffer;
        const u8 *temp;
@@ -1790,23 +1791,13 @@ static int hdmi_drm_infoframe_unpack(struct hdmi_drm_infoframe *frame,
        int ret;
        int i;
 
-       if (size < HDMI_INFOFRAME_SIZE(DRM))
-               return -EINVAL;
-
-       if (ptr[0] != HDMI_INFOFRAME_TYPE_DRM ||
-           ptr[1] != 1 ||
-           ptr[2] != HDMI_DRM_INFOFRAME_SIZE)
-               return -EINVAL;
-
-       if (hdmi_infoframe_checksum(buffer, HDMI_INFOFRAME_SIZE(DRM)) != 0)
+       if (size < HDMI_DRM_INFOFRAME_SIZE)
                return -EINVAL;
 
        ret = hdmi_drm_infoframe_init(frame);
        if (ret)
                return ret;
 
-       ptr += HDMI_INFOFRAME_HEADER_SIZE;
-
        frame->eotf = ptr[0] & 0x7;
        frame->metadata_type = ptr[1] & 0x7;
 
@@ -1814,7 +1805,7 @@ static int hdmi_drm_infoframe_unpack(struct hdmi_drm_infoframe *frame,
        for (i = 0; i < 3; i++) {
                x_lsb = *temp++;
                x_msb = *temp++;
-               frame->display_primaries[i].x =  (x_msb << 8) | x_lsb;
+               frame->display_primaries[i].x = (x_msb << 8) | x_lsb;
                y_lsb = *temp++;
                y_msb = *temp++;
                frame->display_primaries[i].y = (y_msb << 8) | y_lsb;
@@ -1830,6 +1821,42 @@ static int hdmi_drm_infoframe_unpack(struct hdmi_drm_infoframe *frame,
 
        return 0;
 }
+EXPORT_SYMBOL(hdmi_drm_infoframe_unpack_only);
+
+/**
+ * hdmi_drm_infoframe_unpack() - unpack binary buffer to a HDMI DRM infoframe
+ * @frame: HDMI DRM infoframe
+ * @buffer: source buffer
+ * @size: size of buffer
+ *
+ * Unpacks the CTA-861-G DRM infoframe contained in the binary @buffer into
+ * a structured @frame of the HDMI Dynamic Range and Mastering (DRM)
+ * infoframe. It also verifies the checksum as required by section 5.3.5 of
+ * the HDMI 1.4 specification.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+static int hdmi_drm_infoframe_unpack(struct hdmi_drm_infoframe *frame,
+                                    const void *buffer, size_t size)
+{
+       const u8 *ptr = buffer;
+       int ret;
+
+       if (size < HDMI_INFOFRAME_SIZE(DRM))
+               return -EINVAL;
+
+       if (ptr[0] != HDMI_INFOFRAME_TYPE_DRM ||
+           ptr[1] != 1 ||
+           ptr[2] != HDMI_DRM_INFOFRAME_SIZE)
+               return -EINVAL;
+
+       if (hdmi_infoframe_checksum(buffer, HDMI_INFOFRAME_SIZE(DRM)) != 0)
+               return -EINVAL;
+
+       ret = hdmi_drm_infoframe_unpack_only(frame, ptr + HDMI_INFOFRAME_HEADER_SIZE,
+                                            size - HDMI_INFOFRAME_HEADER_SIZE);
+       return ret;
+}
 
 /**
  * hdmi_infoframe_unpack() - unpack binary buffer to a HDMI infoframe
index 3ed5dee899fd16af8f3facdcc585e1e93ee47abd..eb259c2547af782bfecab9a862c809bb6a406580 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 or MIT */
 
 #ifndef _DRM_CLIENT_H_
 #define _DRM_CLIENT_H_
@@ -188,6 +188,6 @@ int drm_client_modeset_dpms(struct drm_client_dev *client, int mode);
        drm_for_each_connector_iter(connector, iter) \
                if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
 
-int drm_client_debugfs_init(struct drm_minor *minor);
+void drm_client_debugfs_init(struct drm_minor *minor);
 
 #endif
index 19ae6bb5c85be15d15b7589bf17591332bc0e653..fd543d1db9b2c5042dba43b8d6d6676098210d09 100644 (file)
@@ -1617,9 +1617,9 @@ struct drm_tile_group {
 };
 
 struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
-                                                 char topology[8]);
+                                                 const char topology[8]);
 struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
-                                              char topology[8]);
+                                              const char topology[8]);
 void drm_mode_put_tile_group(struct drm_device *dev,
                             struct drm_tile_group *tg);
 
index 7501e323d38370fa9410520b7a0618e092900eea..2188dc83957fd22beea320444a4ad1bda3285e4f 100644 (file)
@@ -80,18 +80,16 @@ struct drm_info_node {
 };
 
 #if defined(CONFIG_DEBUG_FS)
-int drm_debugfs_create_files(const struct drm_info_list *files,
-                            int count, struct dentry *root,
-                            struct drm_minor *minor);
+void drm_debugfs_create_files(const struct drm_info_list *files,
+                             int count, struct dentry *root,
+                             struct drm_minor *minor);
 int drm_debugfs_remove_files(const struct drm_info_list *files,
                             int count, struct drm_minor *minor);
 #else
-static inline int drm_debugfs_create_files(const struct drm_info_list *files,
-                                          int count, struct dentry *root,
-                                          struct drm_minor *minor)
-{
-       return 0;
-}
+static inline void drm_debugfs_create_files(const struct drm_info_list *files,
+                                           int count, struct dentry *root,
+                                           struct drm_minor *minor)
+{}
 
 static inline int drm_debugfs_remove_files(const struct drm_info_list *files,
                                           int count, struct drm_minor *minor)
index bb60a949f416ff1e962fddac4f73b45ee737eedb..a55874db9dd446d896476c3f0d429fc24ad6abe0 100644 (file)
@@ -67,15 +67,33 @@ struct drm_device {
        /** @dev: Device structure of bus-device */
        struct device *dev;
 
+       /**
+        * @managed:
+        *
+        * Managed resources linked to the lifetime of this &drm_device as
+        * tracked by @ref.
+        */
+       struct {
+               /** @managed.resources: managed resources list */
+               struct list_head resources;
+               /** @managed.final_kfree: pointer for final kfree() call */
+               void *final_kfree;
+               /** @managed.lock: protects @managed.resources */
+               spinlock_t lock;
+       } managed;
+
        /** @driver: DRM driver managing the device */
        struct drm_driver *driver;
 
        /**
         * @dev_private:
         *
-        * DRM driver private data. Instead of using this pointer it is
-        * recommended that drivers use drm_dev_init() and embed struct
-        * &drm_device in their larger per-device structure.
+        * DRM driver private data. This is deprecated and should be left set to
+        * NULL.
+        *
+        * Instead of using this pointer it is recommended that drivers use
+        * drm_dev_init() and embed struct &drm_device in their larger
+        * per-device structure.
         */
        void *dev_private;
 
index 9d3b745c3107763a84269eb5a48484a698bb4bdd..27bdd273fc4e7d340d72fea2cedf288f9437f054 100644 (file)
@@ -97,7 +97,7 @@ struct displayid_detailed_timing_block {
             (idx) + sizeof(struct displayid_block) <= (length) && \
             (idx) + sizeof(struct displayid_block) + (block)->num_bytes <= (length) && \
             (block)->num_bytes > 0; \
-            (idx) += (block)->num_bytes + sizeof(struct displayid_block), \
+            (idx) += sizeof(struct displayid_block) + (block)->num_bytes, \
             (block) = (struct displayid_block *)&(displayid)[idx])
 
 #endif
index c6119e4c169aee3987e9fd6b2b697c51a532af04..2035ac44afde0b516f8e42250958cc1d7bda78da 100644 (file)
 #define DP_DSC_PEAK_THROUGHPUT              0x06B
 # define DP_DSC_THROUGHPUT_MODE_0_MASK      (0xf << 0)
 # define DP_DSC_THROUGHPUT_MODE_0_SHIFT     0
-# define DP_DSC_THROUGHPUT_MODE_0_UPSUPPORTED 0
+# define DP_DSC_THROUGHPUT_MODE_0_UNSUPPORTED 0
 # define DP_DSC_THROUGHPUT_MODE_0_340       (1 << 0)
 # define DP_DSC_THROUGHPUT_MODE_0_400       (2 << 0)
 # define DP_DSC_THROUGHPUT_MODE_0_450       (3 << 0)
 # define DP_DSC_THROUGHPUT_MODE_0_170       (15 << 0) /* 1.4a */
 # define DP_DSC_THROUGHPUT_MODE_1_MASK      (0xf << 4)
 # define DP_DSC_THROUGHPUT_MODE_1_SHIFT     4
-# define DP_DSC_THROUGHPUT_MODE_1_UPSUPPORTED 0
+# define DP_DSC_THROUGHPUT_MODE_1_UNSUPPORTED 0
 # define DP_DSC_THROUGHPUT_MODE_1_340       (1 << 4)
 # define DP_DSC_THROUGHPUT_MODE_1_400       (2 << 4)
 # define DP_DSC_THROUGHPUT_MODE_1_450       (3 << 4)
 # define DP_TEST_CRC_SUPPORTED             (1 << 5)
 # define DP_TEST_COUNT_MASK                0xf
 
-#define DP_TEST_PHY_PATTERN                 0x248
+#define DP_PHY_TEST_PATTERN                 0x248
+# define DP_PHY_TEST_PATTERN_SEL_MASK       0x7
+# define DP_PHY_TEST_PATTERN_NONE           0x0
+# define DP_PHY_TEST_PATTERN_D10_2          0x1
+# define DP_PHY_TEST_PATTERN_ERROR_COUNT    0x2
+# define DP_PHY_TEST_PATTERN_PRBS7          0x3
+# define DP_PHY_TEST_PATTERN_80BIT_CUSTOM   0x4
+# define DP_PHY_TEST_PATTERN_CP2520         0x5
+
+#define DP_TEST_HBR2_SCRAMBLER_RESET        0x24A
 #define DP_TEST_80BIT_CUSTOM_PATTERN_7_0    0x250
 #define        DP_TEST_80BIT_CUSTOM_PATTERN_15_8   0x251
 #define        DP_TEST_80BIT_CUSTOM_PATTERN_23_16  0x252
@@ -1209,6 +1218,139 @@ struct dp_sdp {
 #define EDP_VSC_PSR_UPDATE_RFB         (1<<1)
 #define EDP_VSC_PSR_CRC_VALUES_VALID   (1<<2)
 
+/**
+ * enum dp_pixelformat - drm DP Pixel encoding formats
+ *
+ * This enum is used to indicate DP VSC SDP Pixel encoding formats.
+ * It is based on DP 1.4 spec [Table 2-117: VSC SDP Payload for DB16 through
+ * DB18]
+ *
+ * @DP_PIXELFORMAT_RGB: RGB pixel encoding format
+ * @DP_PIXELFORMAT_YUV444: YCbCr 4:4:4 pixel encoding format
+ * @DP_PIXELFORMAT_YUV422: YCbCr 4:2:2 pixel encoding format
+ * @DP_PIXELFORMAT_YUV420: YCbCr 4:2:0 pixel encoding format
+ * @DP_PIXELFORMAT_Y_ONLY: Y Only pixel encoding format
+ * @DP_PIXELFORMAT_RAW: RAW pixel encoding format
+ * @DP_PIXELFORMAT_RESERVED: Reserved pixel encoding format
+ */
+enum dp_pixelformat {
+       DP_PIXELFORMAT_RGB = 0,
+       DP_PIXELFORMAT_YUV444 = 0x1,
+       DP_PIXELFORMAT_YUV422 = 0x2,
+       DP_PIXELFORMAT_YUV420 = 0x3,
+       DP_PIXELFORMAT_Y_ONLY = 0x4,
+       DP_PIXELFORMAT_RAW = 0x5,
+       DP_PIXELFORMAT_RESERVED = 0x6,
+};
+
+/**
+ * enum dp_colorimetry - drm DP Colorimetry formats
+ *
+ * This enum is used to indicate DP VSC SDP Colorimetry formats.
+ * It is based on DP 1.4 spec [Table 2-117: VSC SDP Payload for DB16 through
+ * DB18] and a name of enum member follows DRM_MODE_COLORIMETRY definition.
+ *
+ * @DP_COLORIMETRY_DEFAULT: sRGB (IEC 61966-2-1) or
+ *                          ITU-R BT.601 colorimetry format
+ * @DP_COLORIMETRY_RGB_WIDE_FIXED: RGB wide gamut fixed point colorimetry format
+ * @DP_COLORIMETRY_BT709_YCC: ITU-R BT.709 colorimetry format
+ * @DP_COLORIMETRY_RGB_WIDE_FLOAT: RGB wide gamut floating point
+ *                                 (scRGB (IEC 61966-2-2)) colorimetry format
+ * @DP_COLORIMETRY_XVYCC_601: xvYCC601 colorimetry format
+ * @DP_COLORIMETRY_OPRGB: OpRGB colorimetry format
+ * @DP_COLORIMETRY_XVYCC_709: xvYCC709 colorimetry format
+ * @DP_COLORIMETRY_DCI_P3_RGB: DCI-P3 (SMPTE RP 431-2) colorimetry format
+ * @DP_COLORIMETRY_SYCC_601: sYCC601 colorimetry format
+ * @DP_COLORIMETRY_RGB_CUSTOM: RGB Custom Color Profile colorimetry format
+ * @DP_COLORIMETRY_OPYCC_601: opYCC601 colorimetry format
+ * @DP_COLORIMETRY_BT2020_RGB: ITU-R BT.2020 R' G' B' colorimetry format
+ * @DP_COLORIMETRY_BT2020_CYCC: ITU-R BT.2020 Y'c C'bc C'rc colorimetry format
+ * @DP_COLORIMETRY_BT2020_YCC: ITU-R BT.2020 Y' C'b C'r colorimetry format
+ */
+enum dp_colorimetry {
+       DP_COLORIMETRY_DEFAULT = 0,
+       DP_COLORIMETRY_RGB_WIDE_FIXED = 0x1,
+       DP_COLORIMETRY_BT709_YCC = 0x1,
+       DP_COLORIMETRY_RGB_WIDE_FLOAT = 0x2,
+       DP_COLORIMETRY_XVYCC_601 = 0x2,
+       DP_COLORIMETRY_OPRGB = 0x3,
+       DP_COLORIMETRY_XVYCC_709 = 0x3,
+       DP_COLORIMETRY_DCI_P3_RGB = 0x4,
+       DP_COLORIMETRY_SYCC_601 = 0x4,
+       DP_COLORIMETRY_RGB_CUSTOM = 0x5,
+       DP_COLORIMETRY_OPYCC_601 = 0x5,
+       DP_COLORIMETRY_BT2020_RGB = 0x6,
+       DP_COLORIMETRY_BT2020_CYCC = 0x6,
+       DP_COLORIMETRY_BT2020_YCC = 0x7,
+};
+
+/**
+ * enum dp_dynamic_range - drm DP Dynamic Range
+ *
+ * This enum is used to indicate DP VSC SDP Dynamic Range.
+ * It is based on DP 1.4 spec [Table 2-117: VSC SDP Payload for DB16 through
+ * DB18]
+ *
+ * @DP_DYNAMIC_RANGE_VESA: VESA range
+ * @DP_DYNAMIC_RANGE_CTA: CTA range
+ */
+enum dp_dynamic_range {
+       DP_DYNAMIC_RANGE_VESA = 0,
+       DP_DYNAMIC_RANGE_CTA = 1,
+};
+
+/**
+ * enum dp_content_type - drm DP Content Type
+ *
+ * This enum is used to indicate DP VSC SDP Content Types.
+ * It is based on DP 1.4 spec [Table 2-117: VSC SDP Payload for DB16 through
+ * DB18]
+ * CTA-861-G defines content types and expected processing by a sink device
+ *
+ * @DP_CONTENT_TYPE_NOT_DEFINED: Not defined type
+ * @DP_CONTENT_TYPE_GRAPHICS: Graphics type
+ * @DP_CONTENT_TYPE_PHOTO: Photo type
+ * @DP_CONTENT_TYPE_VIDEO: Video type
+ * @DP_CONTENT_TYPE_GAME: Game type
+ */
+enum dp_content_type {
+       DP_CONTENT_TYPE_NOT_DEFINED = 0x00,
+       DP_CONTENT_TYPE_GRAPHICS = 0x01,
+       DP_CONTENT_TYPE_PHOTO = 0x02,
+       DP_CONTENT_TYPE_VIDEO = 0x03,
+       DP_CONTENT_TYPE_GAME = 0x04,
+};
+
+/**
+ * struct drm_dp_vsc_sdp - drm DP VSC SDP
+ *
+ * This structure represents a DP VSC SDP of drm
+ * It is based on DP 1.4 spec [Table 2-116: VSC SDP Header Bytes] and
+ * [Table 2-117: VSC SDP Payload for DB16 through DB18]
+ *
+ * @sdp_type: secondary-data packet type
+ * @revision: revision number
+ * @length: number of valid data bytes
+ * @pixelformat: pixel encoding format
+ * @colorimetry: colorimetry format
+ * @bpc: bit per color
+ * @dynamic_range: dynamic range information
+ * @content_type: CTA-861-G defines content types and expected processing by a sink device
+ */
+struct drm_dp_vsc_sdp {
+       unsigned char sdp_type;
+       unsigned char revision;
+       unsigned char length;
+       enum dp_pixelformat pixelformat;
+       enum dp_colorimetry colorimetry;
+       int bpc;
+       enum dp_dynamic_range dynamic_range;
+       enum dp_content_type content_type;
+};
+
+void drm_dp_vsc_sdp_log(const char *level, struct device *dev,
+                       const struct drm_dp_vsc_sdp *vsc);
+
 int drm_dp_psr_setup_time(const u8 psr_cap[EDP_PSR_RECEIVER_CAP_SIZE]);
 
 static inline int
@@ -1548,6 +1690,13 @@ enum drm_dp_quirk {
         * capabilities advertised.
         */
        DP_QUIRK_FORCE_DPCD_BACKLIGHT,
+       /**
+        * @DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS:
+        *
+        * The device supports a link rate of 3.24 Gbps (multiplier 0xc) despite
+        * the DP_MAX_LINK_RATE register reporting a lower max multiplier.
+        */
+       DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS,
 };
 
 /**
@@ -1598,4 +1747,26 @@ static inline void drm_dp_cec_unset_edid(struct drm_dp_aux *aux)
 
 #endif
 
+/**
+ * struct drm_dp_phy_test_params - DP Phy Compliance parameters
+ * @link_rate: Requested Link rate from DPCD 0x219
+ * @num_lanes: Number of lanes requested by sing through DPCD 0x220
+ * @phy_pattern: DP Phy test pattern from DPCD 0x248
+ * @hb2_reset: DP HBR2_COMPLIANCE_SCRAMBLER_RESET from DCPD 0x24A and 0x24B
+ * @custom80: DP Test_80BIT_CUSTOM_PATTERN from DPCDs 0x250 through 0x259
+ * @enhanced_frame_cap: flag for enhanced frame capability.
+ */
+struct drm_dp_phy_test_params {
+       int link_rate;
+       u8 num_lanes;
+       u8 phy_pattern;
+       u8 hbr2_reset[2];
+       u8 custom80[10];
+       bool enhanced_frame_cap;
+};
+
+int drm_dp_get_phy_test_pattern(struct drm_dp_aux *aux,
+                               struct drm_dp_phy_test_params *data);
+int drm_dp_set_phy_test_pattern(struct drm_dp_aux *aux,
+                               struct drm_dp_phy_test_params *data, u8 dp_rev);
 #endif /* _DRM_DP_HELPER_H_ */
index 3cde42b333c36c0d88b74fdbc63df438c7f3e2b9..9e1ffcd7cb6837d191d3110fc1b1c4b279f0fd87 100644 (file)
@@ -157,19 +157,45 @@ struct drm_dp_mst_port {
         */
        bool has_audio;
 
+       /**
+        * @fec_capable: bool indicating if FEC can be supported up to that
+        * point in the MST topology.
+        */
        bool fec_capable;
 };
 
+/* sideband msg header - not bit struct */
+struct drm_dp_sideband_msg_hdr {
+       u8 lct;
+       u8 lcr;
+       u8 rad[8];
+       bool broadcast;
+       bool path_msg;
+       u8 msg_len;
+       bool somt;
+       bool eomt;
+       bool seqno;
+};
+
+struct drm_dp_sideband_msg_rx {
+       u8 chunk[48];
+       u8 msg[256];
+       u8 curchunk_len;
+       u8 curchunk_idx; /* chunk we are parsing now */
+       u8 curchunk_hdrlen;
+       u8 curlen; /* total length of the msg */
+       bool have_somt;
+       bool have_eomt;
+       struct drm_dp_sideband_msg_hdr initial_hdr;
+};
+
 /**
  * struct drm_dp_mst_branch - MST branch device.
  * @rad: Relative Address to talk to this branch device.
  * @lct: Link count total to talk to this branch device.
  * @num_ports: number of ports on the branch.
- * @msg_slots: one bit per transmitted msg slot.
  * @port_parent: pointer to the port parent, NULL if toplevel.
  * @mgr: topology manager for this branch device.
- * @tx_slots: transmission slots for this device.
- * @last_seqno: last sequence number used to talk to this.
  * @link_address_sent: if a link address message has been sent to this device yet.
  * @guid: guid for DP 1.2 branch device. port under this branch can be
  * identified by port #.
@@ -210,7 +236,6 @@ struct drm_dp_mst_branch {
        u8 lct;
        int num_ports;
 
-       int msg_slots;
        /**
         * @ports: the list of ports on this branch device. This should be
         * considered protected for reading by &drm_dp_mst_topology_mgr.lock.
@@ -223,13 +248,9 @@ struct drm_dp_mst_branch {
         */
        struct list_head ports;
 
-       /* list of tx ops queue for this port */
        struct drm_dp_mst_port *port_parent;
        struct drm_dp_mst_topology_mgr *mgr;
 
-       /* slots are protected by mstb->mgr->qlock */
-       struct drm_dp_sideband_msg_tx *tx_slots[2];
-       int last_seqno;
        bool link_address_sent;
 
        /* global unique identifier to identify branch devices */
@@ -237,19 +258,6 @@ struct drm_dp_mst_branch {
 };
 
 
-/* sideband msg header - not bit struct */
-struct drm_dp_sideband_msg_hdr {
-       u8 lct;
-       u8 lcr;
-       u8 rad[8];
-       bool broadcast;
-       bool path_msg;
-       u8 msg_len;
-       bool somt;
-       bool eomt;
-       bool seqno;
-};
-
 struct drm_dp_nak_reply {
        u8 guid[16];
        u8 reason;
@@ -306,18 +314,6 @@ struct drm_dp_remote_i2c_write_ack_reply {
 };
 
 
-struct drm_dp_sideband_msg_rx {
-       u8 chunk[48];
-       u8 msg[256];
-       u8 curchunk_len;
-       u8 curchunk_idx; /* chunk we are parsing now */
-       u8 curchunk_hdrlen;
-       u8 curlen; /* total length of the msg */
-       bool have_somt;
-       bool have_eomt;
-       struct drm_dp_sideband_msg_hdr initial_hdr;
-};
-
 #define DRM_DP_MAX_SDP_STREAMS 16
 struct drm_dp_allocate_payload {
        u8 port_number;
@@ -479,8 +475,6 @@ struct drm_dp_mst_topology_mgr;
 struct drm_dp_mst_topology_cbs {
        /* create a connector for a port */
        struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path);
-       void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr,
-                                 struct drm_connector *connector);
 };
 
 #define DP_MAX_PAYLOAD (sizeof(unsigned long) * 8)
@@ -555,15 +549,17 @@ struct drm_dp_mst_topology_mgr {
         */
        int conn_base_id;
 
-       /**
-        * @down_rep_recv: Message receiver state for down replies.
-        */
-       struct drm_dp_sideband_msg_rx down_rep_recv;
        /**
         * @up_req_recv: Message receiver state for up requests.
         */
        struct drm_dp_sideband_msg_rx up_req_recv;
 
+       /**
+        * @down_rep_recv: Message receiver state for replies to down
+        * requests.
+        */
+       struct drm_dp_sideband_msg_rx down_rep_recv;
+
        /**
         * @lock: protects @mst_state, @mst_primary, @dpcd, and
         * @payload_id_table_cleared.
@@ -589,11 +585,6 @@ struct drm_dp_mst_topology_mgr {
         */
        bool payload_id_table_cleared : 1;
 
-       /**
-        * @is_waiting_for_dwn_reply: whether we're waiting for a down reply.
-        */
-       bool is_waiting_for_dwn_reply : 1;
-
        /**
         * @mst_primary: Pointer to the primary/first branch device.
         */
@@ -618,13 +609,12 @@ struct drm_dp_mst_topology_mgr {
        const struct drm_private_state_funcs *funcs;
 
        /**
-        * @qlock: protects @tx_msg_downq, the &drm_dp_mst_branch.txslost and
-        * &drm_dp_sideband_msg_tx.state once they are queued
+        * @qlock: protects @tx_msg_downq and &drm_dp_sideband_msg_tx.state
         */
        struct mutex qlock;
 
        /**
-        * @tx_msg_downq: List of pending down replies.
+        * @tx_msg_downq: List of pending down requests
         */
        struct list_head tx_msg_downq;
 
@@ -734,8 +724,6 @@ drm_dp_mst_detect_port(struct drm_connector *connector,
                       struct drm_dp_mst_topology_mgr *mgr,
                       struct drm_dp_mst_port *port);
 
-bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
-                                       struct drm_dp_mst_port *port);
 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
 
 
index 97109df5beac9a237d3864fb0e8e88693bbd682a..6d457652f199fa3f67225d87169ca59bb7525360 100644 (file)
@@ -262,9 +262,11 @@ struct drm_driver {
         * @release:
         *
         * Optional callback for destroying device data after the final
-        * reference is released, i.e. the device is being destroyed. Drivers
-        * using this callback are responsible for calling drm_dev_fini()
-        * to finalize the device and then freeing the struct themselves.
+        * reference is released, i.e. the device is being destroyed.
+        *
+        * This is deprecated, clean up all memory allocations associated with a
+        * &drm_device using drmm_add_action(), drmm_kmalloc() and related
+        * managed resources functions.
         */
        void (*release) (struct drm_device *);
 
@@ -323,7 +325,7 @@ struct drm_driver {
         *
         * Allows drivers to create driver-specific debugfs files.
         */
-       int (*debugfs_init)(struct drm_minor *minor);
+       void (*debugfs_init)(struct drm_minor *minor);
 
        /**
         * @gem_free_object: deconstructor for drm_gem_objects
@@ -620,7 +622,39 @@ int drm_dev_init(struct drm_device *dev,
 int devm_drm_dev_init(struct device *parent,
                      struct drm_device *dev,
                      struct drm_driver *driver);
-void drm_dev_fini(struct drm_device *dev);
+
+void *__devm_drm_dev_alloc(struct device *parent, struct drm_driver *driver,
+                          size_t size, size_t offset);
+
+/**
+ * devm_drm_dev_alloc - Resource managed allocation of a &drm_device instance
+ * @parent: Parent device object
+ * @driver: DRM driver
+ * @type: the type of the struct which contains struct &drm_device
+ * @member: the name of the &drm_device within @type.
+ *
+ * This allocates and initialize a new DRM device. No device registration is done.
+ * Call drm_dev_register() to advertice the device to user space and register it
+ * with other core subsystems. This should be done last in the device
+ * initialization sequence to make sure userspace can't access an inconsistent
+ * state.
+ *
+ * The initial ref-count of the object is 1. Use drm_dev_get() and
+ * drm_dev_put() to take and drop further ref-counts.
+ *
+ * It is recommended that drivers embed &struct drm_device into their own device
+ * structure.
+ *
+ * Note that this manages the lifetime of the resulting &drm_device
+ * automatically using devres. The DRM device initialized with this function is
+ * automatically put on driver detach using drm_dev_put().
+ *
+ * RETURNS:
+ * Pointer to new DRM device, or ERR_PTR on failure.
+ */
+#define devm_drm_dev_alloc(parent, driver, type, member) \
+       ((type *) __devm_drm_dev_alloc(parent, driver, sizeof(type), \
+                                      offsetof(type, member)))
 
 struct drm_device *drm_dev_alloc(struct drm_driver *driver,
                                 struct device *parent);
index 4370e039c015a7501a073297c0e51e75761b278a..a60f5f1555acf9875015951ee371d6db7281690a 100644 (file)
@@ -142,7 +142,7 @@ struct drm_encoder {
         * the bits for all &drm_crtc objects this encoder can be connected to
         * before calling drm_dev_register().
         *
-        * In reality almost every driver gets this wrong.
+        * You will get a WARN if you get this wrong in the driver.
         *
         * Note that since CRTC objects can't be hotplugged the assigned indices
         * are stable and hence known before registering all objects.
@@ -159,7 +159,11 @@ struct drm_encoder {
         * encoders can be used in a cloned configuration, they both should have
         * each another bits set.
         *
-        * In reality almost every driver gets this wrong.
+        * As an exception to the above rule if the driver doesn't implement
+        * any cloning it can leave @possible_clones set to 0. The core will
+        * automagically fix this up by setting the bit for the encoder itself.
+        *
+        * You will get a WARN if you get this wrong in the driver.
         *
         * Note that since encoder objects can't be hotplugged the assigned indices
         * are stable and hence known before registering all objects.
index 208dbf87afa3ed7a7496798a45ffb365e6643be8..306aa3a60be96016e6b5d8744d77ec2498a45595 100644 (file)
@@ -269,7 +269,8 @@ int drm_fb_helper_debug_leave(struct fb_info *info);
 void drm_fb_helper_lastclose(struct drm_device *dev);
 void drm_fb_helper_output_poll_changed(struct drm_device *dev);
 
-int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp);
+void drm_fbdev_generic_setup(struct drm_device *dev,
+                            unsigned int preferred_bpp);
 #else
 static inline void drm_fb_helper_prepare(struct drm_device *dev,
                                        struct drm_fb_helper *helper,
@@ -443,10 +444,9 @@ static inline void drm_fb_helper_output_poll_changed(struct drm_device *dev)
 {
 }
 
-static inline int
+static inline void
 drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
 {
-       return 0;
 }
 
 #endif
index 5aaf1c4593a970e24d688c4ddde966bea3a10b8d..716990bace1048139e0c316e07d97d30a71e53f2 100644 (file)
@@ -201,6 +201,17 @@ struct drm_file {
         */
        bool writeback_connectors;
 
+       /**
+        * @was_master:
+        *
+        * This client has or had, master capability. Protected by struct
+        * &drm_device.master_mutex.
+        *
+        * This is used to ensure that CAP_SYS_ADMIN is not enforced, if the
+        * client is or was master in the past.
+        */
+       bool was_master;
+
        /**
         * @is_master:
         *
index c0e0256e3e988f87df08b761db8c3ca1b2799e26..be658ebbec72b084c47d5f7ab8289a942d6f6632 100644 (file)
@@ -297,4 +297,42 @@ int drm_framebuffer_plane_width(int width,
 int drm_framebuffer_plane_height(int height,
                                 const struct drm_framebuffer *fb, int plane);
 
+/**
+ * struct drm_afbc_framebuffer - a special afbc frame buffer object
+ *
+ * A derived class of struct drm_framebuffer, dedicated for afbc use cases.
+ */
+struct drm_afbc_framebuffer {
+       /**
+        * @base: base framebuffer structure.
+        */
+       struct drm_framebuffer base;
+       /**
+        * @block_width: width of a single afbc block
+        */
+       u32 block_width;
+       /**
+        * @block_height: height of a single afbc block
+        */
+       u32 block_height;
+       /**
+        * @aligned_width: aligned frame buffer width
+        */
+       u32 aligned_width;
+       /**
+        * @aligned_height: aligned frame buffer height
+        */
+       u32 aligned_height;
+       /**
+        * @offset: offset of the first afbc header
+        */
+       u32 offset;
+       /**
+        * @afbc_size: minimum size of afbc buffer
+        */
+       u32 afbc_size;
+};
+
+#define fb_to_afbc_fb(x) container_of(x, struct drm_afbc_framebuffer, base)
+
 #endif
index d9f13fd25b0aef962808e95fc96a3e3b0f04f53d..6b013154911dcd1e6a26d831bd5fc0e2f28c0b33 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef __DRM_GEM_FB_HELPER_H__
 #define __DRM_GEM_FB_HELPER_H__
 
+struct drm_afbc_framebuffer;
 struct drm_device;
 struct drm_fb_helper_surface_size;
 struct drm_file;
@@ -12,12 +13,19 @@ struct drm_plane;
 struct drm_plane_state;
 struct drm_simple_display_pipe;
 
+#define AFBC_VENDOR_AND_TYPE_MASK      GENMASK_ULL(63, 52)
+
 struct drm_gem_object *drm_gem_fb_get_obj(struct drm_framebuffer *fb,
                                          unsigned int plane);
 void drm_gem_fb_destroy(struct drm_framebuffer *fb);
 int drm_gem_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *file,
                             unsigned int *handle);
 
+int drm_gem_fb_init_with_funcs(struct drm_device *dev,
+                              struct drm_framebuffer *fb,
+                              struct drm_file *file,
+                              const struct drm_mode_fb_cmd2 *mode_cmd,
+                              const struct drm_framebuffer_funcs *funcs);
 struct drm_framebuffer *
 drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
                             const struct drm_mode_fb_cmd2 *mode_cmd,
@@ -29,6 +37,13 @@ struct drm_framebuffer *
 drm_gem_fb_create_with_dirty(struct drm_device *dev, struct drm_file *file,
                             const struct drm_mode_fb_cmd2 *mode_cmd);
 
+#define drm_is_afbc(modifier) \
+       (((modifier) & AFBC_VENDOR_AND_TYPE_MASK) == DRM_FORMAT_MOD_ARM_AFBC(0))
+
+int drm_gem_fb_afbc_init(struct drm_device *dev,
+                        const struct drm_mode_fb_cmd2 *mode_cmd,
+                        struct drm_afbc_framebuffer *afbc_fb);
+
 int drm_gem_fb_prepare_fb(struct drm_plane *plane,
                          struct drm_plane_state *state);
 int drm_gem_fb_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
index 0f6e47213d8d65611da6732a97e8d8f5786dce11..b63bcd1b996da4d19cf25f38a88034e8cc962d85 100644 (file)
@@ -196,7 +196,7 @@ static inline struct drm_vram_mm *drm_vram_mm_of_bdev(
        return container_of(bdev, struct drm_vram_mm, bdev);
 }
 
-int drm_vram_mm_debugfs_init(struct drm_minor *minor);
+void drm_vram_mm_debugfs_init(struct drm_minor *minor);
 
 /*
  * Helpers for integration with struct drm_device
index aed382c17b2695a7d617d87fcbbe89351130bab2..852d7451eeb12943d9be4f77126d8a043df8f5fe 100644 (file)
@@ -194,11 +194,26 @@ void drm_legacy_idlelock_release(struct drm_lock_data *lock);
 
 #ifdef CONFIG_PCI
 
+struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, size_t size,
+                                    size_t align);
+void drm_pci_free(struct drm_device *dev, struct drm_dma_handle *dmah);
+
 int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver);
 void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver);
 
 #else
 
+static inline struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev,
+                                                  size_t size, size_t align)
+{
+       return NULL;
+}
+
+static inline void drm_pci_free(struct drm_device *dev,
+                               struct drm_dma_handle *dmah)
+{
+}
+
 static inline int drm_legacy_pci_init(struct drm_driver *driver,
                                      struct pci_driver *pdriver)
 {
diff --git a/include/drm/drm_managed.h b/include/drm/drm_managed.h
new file mode 100644 (file)
index 0000000..ca41146
--- /dev/null
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#ifndef _DRM_MANAGED_H_
+#define _DRM_MANAGED_H_
+
+#include <linux/gfp.h>
+#include <linux/overflow.h>
+#include <linux/types.h>
+
+struct drm_device;
+
+typedef void (*drmres_release_t)(struct drm_device *dev, void *res);
+
+/**
+ * drmm_add_action - add a managed release action to a &drm_device
+ * @dev: DRM device
+ * @action: function which should be called when @dev is released
+ * @data: opaque pointer, passed to @action
+ *
+ * This function adds the @release action with optional parameter @data to the
+ * list of cleanup actions for @dev. The cleanup actions will be run in reverse
+ * order in the final drm_dev_put() call for @dev.
+ */
+#define drmm_add_action(dev, action, data) \
+       __drmm_add_action(dev, action, data, #action)
+
+int __must_check __drmm_add_action(struct drm_device *dev,
+                                  drmres_release_t action,
+                                  void *data, const char *name);
+
+/**
+ * drmm_add_action_or_reset - add a managed release action to a &drm_device
+ * @dev: DRM device
+ * @action: function which should be called when @dev is released
+ * @data: opaque pointer, passed to @action
+ *
+ * Similar to drmm_add_action(), with the only difference that upon failure
+ * @action is directly called for any cleanup work necessary on failures.
+ */
+#define drmm_add_action_or_reset(dev, action, data) \
+       __drmm_add_action_or_reset(dev, action, data, #action)
+
+int __must_check __drmm_add_action_or_reset(struct drm_device *dev,
+                                           drmres_release_t action,
+                                           void *data, const char *name);
+
+void drmm_add_final_kfree(struct drm_device *dev, void *container);
+
+void *drmm_kmalloc(struct drm_device *dev, size_t size, gfp_t gfp) __malloc;
+
+/**
+ * drmm_kzalloc - &drm_device managed kzalloc()
+ * @dev: DRM device
+ * @size: size of the memory allocation
+ * @gfp: GFP allocation flags
+ *
+ * This is a &drm_device managed version of kzalloc(). The allocated memory is
+ * automatically freed on the final drm_dev_put(). Memory can also be freed
+ * before the final drm_dev_put() by calling drmm_kfree().
+ */
+static inline void *drmm_kzalloc(struct drm_device *dev, size_t size, gfp_t gfp)
+{
+       return drmm_kmalloc(dev, size, gfp | __GFP_ZERO);
+}
+
+/**
+ * drmm_kmalloc_array - &drm_device managed kmalloc_array()
+ * @dev: DRM device
+ * @n: number of array elements to allocate
+ * @size: size of array member
+ * @flags: GFP allocation flags
+ *
+ * This is a &drm_device managed version of kmalloc_array(). The allocated
+ * memory is automatically freed on the final drm_dev_put() and works exactly
+ * like a memory allocation obtained by drmm_kmalloc().
+ */
+static inline void *drmm_kmalloc_array(struct drm_device *dev,
+                                      size_t n, size_t size, gfp_t flags)
+{
+       size_t bytes;
+
+       if (unlikely(check_mul_overflow(n, size, &bytes)))
+               return NULL;
+
+       return drmm_kmalloc(dev, bytes, flags);
+}
+
+/**
+ * drmm_kcalloc - &drm_device managed kcalloc()
+ * @dev: DRM device
+ * @n: number of array elements to allocate
+ * @size: size of array member
+ * @flags: GFP allocation flags
+ *
+ * This is a &drm_device managed version of kcalloc(). The allocated memory is
+ * automatically freed on the final drm_dev_put() and works exactly like a
+ * memory allocation obtained by drmm_kmalloc().
+ */
+static inline void *drmm_kcalloc(struct drm_device *dev,
+                                size_t n, size_t size, gfp_t flags)
+{
+       return drmm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
+}
+
+char *drmm_kstrdup(struct drm_device *dev, const char *s, gfp_t gfp);
+
+void drmm_kfree(struct drm_device *dev, void *data);
+
+#endif
index 33f325f5af2b921f6fd9b6f693c78257ea6f3cac..4d0e49c0ed2cf2efa11a61a9026689b800236dea 100644 (file)
@@ -152,7 +152,6 @@ int mipi_dbi_dev_init_with_formats(struct mipi_dbi_dev *dbidev,
 int mipi_dbi_dev_init(struct mipi_dbi_dev *dbidev,
                      const struct drm_simple_display_pipe_funcs *funcs,
                      const struct drm_display_mode *mode, unsigned int rotation);
-void mipi_dbi_release(struct drm_device *drm);
 void mipi_dbi_pipe_update(struct drm_simple_display_pipe *pipe,
                          struct drm_plane_state *old_state);
 void mipi_dbi_enable_flush(struct mipi_dbi_dev *dbidev,
@@ -170,7 +169,8 @@ int mipi_dbi_spi_transfer(struct spi_device *spi, u32 speed_hz,
 
 int mipi_dbi_command_read(struct mipi_dbi *dbi, u8 cmd, u8 *val);
 int mipi_dbi_command_buf(struct mipi_dbi *dbi, u8 cmd, u8 *data, size_t len);
-int mipi_dbi_command_stackbuf(struct mipi_dbi *dbi, u8 cmd, u8 *data, size_t len);
+int mipi_dbi_command_stackbuf(struct mipi_dbi *dbi, u8 cmd, const u8 *data,
+                             size_t len);
 int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
                      struct drm_rect *clip, bool swap);
 /**
@@ -187,12 +187,12 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
  */
 #define mipi_dbi_command(dbi, cmd, seq...) \
 ({ \
-       u8 d[] = { seq }; \
+       const u8 d[] = { seq }; \
        mipi_dbi_command_stackbuf(dbi, cmd, d, ARRAY_SIZE(d)); \
 })
 
 #ifdef CONFIG_DEBUG_FS
-int mipi_dbi_debugfs_init(struct drm_minor *minor);
+void mipi_dbi_debugfs_init(struct drm_minor *minor);
 #else
 #define mipi_dbi_debugfs_init          NULL
 #endif
index ee8b0e80ca90b72d34e10dffc50b5562a52144ae..a01bc6fac83cef32f77ac849643b115f033929e8 100644 (file)
@@ -168,6 +168,7 @@ struct drm_mm_node {
        struct rb_node rb_hole_addr;
        u64 __subtree_last;
        u64 hole_size;
+       u64 subtree_max_hole;
        unsigned long flags;
 #define DRM_MM_NODE_ALLOCATED_BIT      0
 #define DRM_MM_NODE_SCANNED_BIT                1
index 3bcbe30339f044cd18d7bef994b8d98271ca9eca..6c3ef49b46b3ae7357dbfee50b73f4538bc62ed3 100644 (file)
@@ -929,7 +929,23 @@ struct drm_mode_config {
        const struct drm_mode_config_helper_funcs *helper_private;
 };
 
-void drm_mode_config_init(struct drm_device *dev);
+int __must_check drmm_mode_config_init(struct drm_device *dev);
+
+/**
+ * drm_mode_config_init - DRM mode_configuration structure initialization
+ * @dev: DRM device
+ *
+ * This is the unmanaged version of drmm_mode_config_init() for drivers which
+ * still explicitly call drm_mode_config_cleanup().
+ *
+ * FIXME: This function is deprecated and drivers should be converted over to
+ * drmm_mode_config_init().
+ */
+static inline int drm_mode_config_init(struct drm_device *dev)
+{
+       return drmm_mode_config_init(dev);
+}
+
 void drm_mode_config_reset(struct drm_device *dev);
 void drm_mode_config_cleanup(struct drm_device *dev);
 
index 320f8112a0f84e3bf3aebf248e374ce2be8d628d..303ee5fbbdd8347f4b3428eb0f77414958d389aa 100644 (file)
@@ -390,16 +390,6 @@ struct drm_display_mode {
         */
        int vrefresh;
 
-       /**
-        * @hsync:
-        *
-        * Horizontal refresh rate, for debug output in human readable form. Not
-        * used in a functional way.
-        *
-        * This value is in kHz.
-        */
-       int hsync;
-
        /**
         * @picture_aspect_ratio:
         *
@@ -493,7 +483,6 @@ int of_get_drm_display_mode(struct device_node *np,
                            int index);
 
 void drm_mode_set_name(struct drm_display_mode *mode);
-int drm_mode_hsync(const struct drm_display_mode *mode);
 int drm_mode_vrefresh(const struct drm_display_mode *mode);
 void drm_mode_get_hv_timing(const struct drm_display_mode *mode,
                            int *hdisplay, int *vdisplay);
index 7c20b1c8b6a7a0b313cb4028f68a2372ece91bf5..421a30f084631749f092061f5c424bebe2c7f0ac 100644 (file)
@@ -1075,8 +1075,35 @@ struct drm_connector_helper_funcs {
        void (*atomic_commit)(struct drm_connector *connector,
                              struct drm_connector_state *state);
 
+       /**
+        * @prepare_writeback_job:
+        *
+        * As writeback jobs contain a framebuffer, drivers may need to
+        * prepare and clean them up the same way they can prepare and
+        * clean up framebuffers for planes. This optional connector operation
+        * is used to support the preparation of writeback jobs. The job
+        * prepare operation is called from drm_atomic_helper_prepare_planes()
+        * for struct &drm_writeback_connector connectors only.
+        *
+        * This operation is optional.
+        *
+        * This callback is used by the atomic modeset helpers.
+        */
        int (*prepare_writeback_job)(struct drm_writeback_connector *connector,
                                     struct drm_writeback_job *job);
+       /**
+        * @cleanup_writeback_job:
+        *
+        * This optional connector operation is used to support the
+        * cleanup of writeback jobs. The job cleanup operation is called
+        * from the existing drm_writeback_cleanup_job() function, invoked
+        * both when destroying the job as part of an aborted commit, or when
+        * the job completes.
+        *
+        * This operation is optional.
+        *
+        * This callback is used by the atomic modeset helpers.
+        */
        void (*cleanup_writeback_job)(struct drm_writeback_connector *connector,
                                      struct drm_writeback_job *job);
 };
diff --git a/include/drm/drm_pci.h b/include/drm/drm_pci.h
deleted file mode 100644 (file)
index 3941b02..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Internal Header for the Direct Rendering Manager
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright (c) 2009-2010, Code Aurora Forum.
- * All rights reserved.
- *
- * Author: Rickard E. (Rik) Faith <faith@valinux.com>
- * Author: Gareth Hughes <gareth@valinux.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef _DRM_PCI_H_
-#define _DRM_PCI_H_
-
-#include <linux/pci.h>
-
-struct drm_dma_handle;
-struct drm_device;
-struct drm_driver;
-struct drm_master;
-
-#ifdef CONFIG_PCI
-
-struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, size_t size,
-                                    size_t align);
-void drm_pci_free(struct drm_device *dev, struct drm_dma_handle * dmah);
-
-#else
-
-static inline struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev,
-                                                  size_t size, size_t align)
-{
-       return NULL;
-}
-
-static inline void drm_pci_free(struct drm_device *dev,
-                               struct drm_dma_handle *dmah)
-{
-}
-
-#endif
-
-#endif /* _DRM_PCI_H_ */
index ca7cee8e728a5e7c94c8809c7b0bafc4ecb7959c..1c9417430d08520f8c5c9a593ee874f7d5aebdfc 100644 (file)
@@ -313,6 +313,10 @@ enum drm_debug_category {
         * @DRM_UT_DP: Used in the DP code.
         */
        DRM_UT_DP               = 0x100,
+       /**
+        * @DRM_UT_DRMRES: Used in the drm managed resources code.
+        */
+       DRM_UT_DRMRES           = 0x200,
 };
 
 static inline bool drm_debug_enabled(enum drm_debug_category category)
@@ -442,6 +446,8 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
        drm_dev_dbg((drm)->dev, DRM_UT_LEASE, fmt, ##__VA_ARGS__)
 #define drm_dbg_dp(drm, fmt, ...)                                      \
        drm_dev_dbg((drm)->dev, DRM_UT_DP, fmt, ##__VA_ARGS__)
+#define drm_dbg_drmres(drm, fmt, ...)                                  \
+       drm_dev_dbg((drm)->dev, DRM_UT_DRMRES, fmt, ##__VA_ARGS__)
 
 
 /*
index 777c14c847f03f85f5a0aa57864ef7037c5a26c3..9697d2714d2abf420eb22525afbcdbd991b73daf 100644 (file)
 #include <drm/drm_encoder.h>
 #include <linux/workqueue.h>
 
+/**
+ * struct drm_writeback_connector - DRM writeback connector
+ */
 struct drm_writeback_connector {
+       /**
+        * @base: base drm_connector object
+        */
        struct drm_connector base;
 
        /**
@@ -78,6 +84,9 @@ struct drm_writeback_connector {
        char timeline_name[32];
 };
 
+/**
+ * struct drm_writeback_job - DRM writeback job
+ */
 struct drm_writeback_job {
        /**
         * @connector:
index 26b04ff62676612aee4a507255710db4177853ba..a21b3b92135a6ee4898a20fed3464967c463172f 100644 (file)
@@ -56,6 +56,7 @@ enum drm_sched_priority {
  *              Jobs from this entity can be scheduled on any scheduler
  *              on this list.
  * @num_sched_list: number of drm_gpu_schedulers in the sched_list.
+ * @priority: priority of the entity
  * @rq_lock: lock to modify the runqueue to which this entity belongs.
  * @job_queue: the list of jobs of this entity.
  * @fence_seq: a linearly increasing seqno incremented with each
index 1d2c12219f44558db58dbb3d7713d389b319414e..662d8351c87a99e79e76716b83ff9c8e820242f3 100644 (file)
 
 /* TGL */
 #define INTEL_TGL_12_IDS(info) \
-       INTEL_VGA_DEVICE(0x9A49, info), \
        INTEL_VGA_DEVICE(0x9A40, info), \
+       INTEL_VGA_DEVICE(0x9A49, info), \
        INTEL_VGA_DEVICE(0x9A59, info), \
        INTEL_VGA_DEVICE(0x9A60, info), \
        INTEL_VGA_DEVICE(0x9A68, info), \
        INTEL_VGA_DEVICE(0x9A70, info), \
-       INTEL_VGA_DEVICE(0x9A78, info)
+       INTEL_VGA_DEVICE(0x9A78, info), \
+       INTEL_VGA_DEVICE(0x9AC0, info), \
+       INTEL_VGA_DEVICE(0x9AC9, info), \
+       INTEL_VGA_DEVICE(0x9AD9, info), \
+       INTEL_VGA_DEVICE(0x9AF8, info)
 
 #endif /* _I915_PCIIDS_H */
index c9e0fd09f4b25cbca01c8b7fbca457ff8db857e0..54a527aa79ccddae36b731b0f015aa203ad81b39 100644 (file)
@@ -390,7 +390,6 @@ struct ttm_bo_driver {
 /**
  * struct ttm_bo_global - Buffer object driver global data.
  *
- * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
  * @dummy_read_page: Pointer to a dummy page used for mapping requests
  * of unpopulated pages.
  * @shrink: A shrink callback object used for buffer object swap.
diff --git a/include/drm/ttm/ttm_debug.h b/include/drm/ttm/ttm_debug.h
deleted file mode 100644 (file)
index b5e460f..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-/**************************************************************************
- *
- * Copyright (c) 2017 Advanced Micro Devices, Inc.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Tom St Denis <tom.stdenis@amd.com>
- */
-extern void ttm_trace_dma_map(struct device *dev, struct ttm_dma_tt *tt);
-extern void ttm_trace_dma_unmap(struct device *dev, struct ttm_dma_tt *tt);
index 57bcef6f988a2d237f8e986bc9cefd01c639340d..ab0c156abee6e95e1f37b526467e2efd012c5b5a 100644 (file)
@@ -333,6 +333,14 @@ struct dma_buf {
  * Attachment operations implemented by the importer.
  */
 struct dma_buf_attach_ops {
+       /**
+        * @allow_peer2peer:
+        *
+        * If this is set to true the importer must be able to handle peer
+        * resources without struct pages.
+        */
+       bool allow_peer2peer;
+
        /**
         * @move_notify: [optional] notification that the DMA-buf is moving
         *
@@ -361,6 +369,7 @@ struct dma_buf_attach_ops {
  * @node: list of dma_buf_attachment, protected by dma_resv lock of the dmabuf.
  * @sgt: cached mapping.
  * @dir: direction of cached mapping.
+ * @peer2peer: true if the importer can handle peer resources without pages.
  * @priv: exporter specific attachment data.
  * @importer_ops: importer operations for this attachment, if provided
  * dma_buf_map/unmap_attachment() must be called with the dma_resv lock held.
@@ -381,6 +390,7 @@ struct dma_buf_attachment {
        struct list_head node;
        struct sg_table *sgt;
        enum dma_data_direction dir;
+       bool peer2peer;
        const struct dma_buf_attach_ops *importer_ops;
        void *importer_priv;
        void *priv;
index 9613d796cfb1772a97f225e79936fe21a68e224b..50c31f1a0a2d097a57b9ceac6adb3d54cbd53747 100644 (file)
@@ -219,6 +219,8 @@ ssize_t hdmi_drm_infoframe_pack(struct hdmi_drm_infoframe *frame, void *buffer,
 ssize_t hdmi_drm_infoframe_pack_only(const struct hdmi_drm_infoframe *frame,
                                     void *buffer, size_t size);
 int hdmi_drm_infoframe_check(struct hdmi_drm_infoframe *frame);
+int hdmi_drm_infoframe_unpack_only(struct hdmi_drm_infoframe *frame,
+                                  const void *buffer, size_t size);
 
 enum hdmi_spd_sdi {
        HDMI_SPD_SDI_UNKNOWN,
index d28b4ce744d59579a8ef5e9e004a9210c3bfdab6..4e873dcbe68f1338021c7e115a88b6fbb8c3f590 100644 (file)
@@ -133,6 +133,11 @@ extern "C" {
  * releasing the memory
  */
 #define AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE (1 << 9)
+/* Flag that BO will be encrypted and that the TMZ bit should be
+ * set in the PTEs when mapping this buffer via GPUVM or
+ * accessing it with various hw blocks
+ */
+#define AMDGPU_GEM_CREATE_ENCRYPTED            (1 << 10)
 
 struct drm_amdgpu_gem_create_in  {
        /** the requested memory size */
@@ -559,7 +564,7 @@ struct drm_amdgpu_cs_in {
        /**  Handle of resource list associated with CS */
        __u32           bo_list_handle;
        __u32           num_chunks;
-       __u32           _pad;
+       __u32           flags;
        /** this points to __u64 * which point to cs chunks */
        __u64           chunks;
 };
@@ -593,6 +598,14 @@ union drm_amdgpu_cs {
  */
 #define AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID (1 << 4)
 
+/* Flag the IB as secure (TMZ)
+ */
+#define AMDGPU_IB_FLAGS_SECURE  (1 << 5)
+
+/* Tell KMD to flush and invalidate caches
+ */
+#define AMDGPU_IB_FLAG_EMIT_MEM_SYNC  (1 << 6)
+
 struct drm_amdgpu_cs_chunk_ib {
        __u32 _pad;
        /** AMDGPU_IB_FLAG_* */
index 8bc0b31597d80737a804e96fb81b46b5308afc27..490143500a50806ce40bd77bc60b4ba6c66dc0ff 100644 (file)
@@ -354,9 +354,12 @@ extern "C" {
  * a platform-dependent stride. On top of that the memory can apply
  * platform-depending swizzling of some higher address bits into bit6.
  *
- * This format is highly platforms specific and not useful for cross-driver
- * sharing. It exists since on a given platform it does uniquely identify the
- * layout in a simple way for i915-specific userspace.
+ * Note that this layout is only accurate on intel gen 8+ or valleyview chipsets.
+ * On earlier platforms the is highly platforms specific and not useful for
+ * cross-driver sharing. It exists since on a given platform it does uniquely
+ * identify the layout in a simple way for i915-specific userspace, which
+ * facilitated conversion of userspace to modifiers. Additionally the exact
+ * format on some really old platforms is not known.
  */
 #define I915_FORMAT_MOD_X_TILED        fourcc_mod_code(INTEL, 1)
 
@@ -369,9 +372,12 @@ extern "C" {
  * memory can apply platform-depending swizzling of some higher address bits
  * into bit6.
  *
- * This format is highly platforms specific and not useful for cross-driver
- * sharing. It exists since on a given platform it does uniquely identify the
- * layout in a simple way for i915-specific userspace.
+ * Note that this layout is only accurate on intel gen 8+ or valleyview chipsets.
+ * On earlier platforms the is highly platforms specific and not useful for
+ * cross-driver sharing. It exists since on a given platform it does uniquely
+ * identify the layout in a simple way for i915-specific userspace, which
+ * facilitated conversion of userspace to modifiers. Additionally the exact
+ * format on some really old platforms is not known.
  */
 #define I915_FORMAT_MOD_Y_TILED        fourcc_mod_code(INTEL, 2)
 
@@ -521,7 +527,113 @@ extern "C" {
 #define DRM_FORMAT_MOD_NVIDIA_TEGRA_TILED fourcc_mod_code(NVIDIA, 1)
 
 /*
- * 16Bx2 Block Linear layout, used by desktop GPUs, and Tegra K1 and later
+ * Generalized Block Linear layout, used by desktop GPUs starting with NV50/G80,
+ * and Tegra GPUs starting with Tegra K1.
+ *
+ * Pixels are arranged in Groups of Bytes (GOBs).  GOB size and layout varies
+ * based on the architecture generation.  GOBs themselves are then arranged in
+ * 3D blocks, with the block dimensions (in terms of GOBs) always being a power
+ * of two, and hence expressible as their log2 equivalent (E.g., "2" represents
+ * a block depth or height of "4").
+ *
+ * Chapter 20 "Pixel Memory Formats" of the Tegra X1 TRM describes this format
+ * in full detail.
+ *
+ *       Macro
+ * Bits  Param Description
+ * ----  ----- -----------------------------------------------------------------
+ *
+ *  3:0  h     log2(height) of each block, in GOBs.  Placed here for
+ *             compatibility with the existing
+ *             DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK()-based modifiers.
+ *
+ *  4:4  -     Must be 1, to indicate block-linear layout.  Necessary for
+ *             compatibility with the existing
+ *             DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK()-based modifiers.
+ *
+ *  8:5  -     Reserved (To support 3D-surfaces with variable log2(depth) block
+ *             size).  Must be zero.
+ *
+ *             Note there is no log2(width) parameter.  Some portions of the
+ *             hardware support a block width of two gobs, but it is impractical
+ *             to use due to lack of support elsewhere, and has no known
+ *             benefits.
+ *
+ * 11:9  -     Reserved (To support 2D-array textures with variable array stride
+ *             in blocks, specified via log2(tile width in blocks)).  Must be
+ *             zero.
+ *
+ * 19:12 k     Page Kind.  This value directly maps to a field in the page
+ *             tables of all GPUs >= NV50.  It affects the exact layout of bits
+ *             in memory and can be derived from the tuple
+ *
+ *               (format, GPU model, compression type, samples per pixel)
+ *
+ *             Where compression type is defined below.  If GPU model were
+ *             implied by the format modifier, format, or memory buffer, page
+ *             kind would not need to be included in the modifier itself, but
+ *             since the modifier should define the layout of the associated
+ *             memory buffer independent from any device or other context, it
+ *             must be included here.
+ *
+ * 21:20 g     GOB Height and Page Kind Generation.  The height of a GOB changed
+ *             starting with Fermi GPUs.  Additionally, the mapping between page
+ *             kind and bit layout has changed at various points.
+ *
+ *               0 = Gob Height 8, Fermi - Volta, Tegra K1+ Page Kind mapping
+ *               1 = Gob Height 4, G80 - GT2XX Page Kind mapping
+ *               2 = Gob Height 8, Turing+ Page Kind mapping
+ *               3 = Reserved for future use.
+ *
+ * 22:22 s     Sector layout.  On Tegra GPUs prior to Xavier, there is a further
+ *             bit remapping step that occurs at an even lower level than the
+ *             page kind and block linear swizzles.  This causes the layout of
+ *             surfaces mapped in those SOC's GPUs to be incompatible with the
+ *             equivalent mapping on other GPUs in the same system.
+ *
+ *               0 = Tegra K1 - Tegra Parker/TX2 Layout.
+ *               1 = Desktop GPU and Tegra Xavier+ Layout
+ *
+ * 25:23 c     Lossless Framebuffer Compression type.
+ *
+ *               0 = none
+ *               1 = ROP/3D, layout 1, exact compression format implied by Page
+ *                   Kind field
+ *               2 = ROP/3D, layout 2, exact compression format implied by Page
+ *                   Kind field
+ *               3 = CDE horizontal
+ *               4 = CDE vertical
+ *               5 = Reserved for future use
+ *               6 = Reserved for future use
+ *               7 = Reserved for future use
+ *
+ * 55:25 -     Reserved for future use.  Must be zero.
+ */
+#define DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(c, s, g, k, h) \
+       fourcc_mod_code(NVIDIA, (0x10 | \
+                                ((h) & 0xf) | \
+                                (((k) & 0xff) << 12) | \
+                                (((g) & 0x3) << 20) | \
+                                (((s) & 0x1) << 22) | \
+                                (((c) & 0x7) << 23)))
+
+/* To grandfather in prior block linear format modifiers to the above layout,
+ * the page kind "0", which corresponds to "pitch/linear" and hence is unusable
+ * with block-linear layouts, is remapped within drivers to the value 0xfe,
+ * which corresponds to the "generic" kind used for simple single-sample
+ * uncompressed color formats on Fermi - Volta GPUs.
+ */
+static inline __u64
+drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
+{
+       if (!(modifier & 0x10) || (modifier & (0xff << 12)))
+               return modifier;
+       else
+               return modifier | (0xfe << 12);
+}
+
+/*
+ * 16Bx2 Block Linear layout, used by Tegra K1 and later
  *
  * Pixels are arranged in 64x8 Groups Of Bytes (GOBs). GOBs are then stacked
  * vertically by a power of 2 (1 to 32 GOBs) to form a block.
@@ -542,20 +654,20 @@ extern "C" {
  * in full detail.
  */
 #define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(v) \
-       fourcc_mod_code(NVIDIA, 0x10 | ((v) & 0xf))
+       DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 0, 0, 0, (v))
 
 #define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_ONE_GOB \
-       fourcc_mod_code(NVIDIA, 0x10)
+       DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0)
 #define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_TWO_GOB \
-       fourcc_mod_code(NVIDIA, 0x11)
+       DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1)
 #define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_FOUR_GOB \
-       fourcc_mod_code(NVIDIA, 0x12)
+       DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2)
 #define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_EIGHT_GOB \
-       fourcc_mod_code(NVIDIA, 0x13)
+       DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3)
 #define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_SIXTEEN_GOB \
-       fourcc_mod_code(NVIDIA, 0x14)
+       DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4)
 #define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_THIRTYTWO_GOB \
-       fourcc_mod_code(NVIDIA, 0x15)
+       DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5)
 
 /*
  * Some Broadcom modifiers take parameters, for example the number of
index 2813e579b480cc5e6c10e85663b64ac7dafe9102..14b67cd6b54b76a2cc7ef93aab3d2e37e8f8770f 100644 (file)
@@ -1969,6 +1969,30 @@ enum drm_i915_perf_property_id {
         */
        DRM_I915_PERF_PROP_HOLD_PREEMPTION,
 
+       /**
+        * Specifying this pins all contexts to the specified SSEU power
+        * configuration for the duration of the recording.
+        *
+        * This parameter's value is a pointer to a struct
+        * drm_i915_gem_context_param_sseu.
+        *
+        * This property is available in perf revision 4.
+        */
+       DRM_I915_PERF_PROP_GLOBAL_SSEU,
+
+       /**
+        * This optional parameter specifies the timer interval in nanoseconds
+        * at which the i915 driver will check the OA buffer for available data.
+        * Minimum allowed value is 100 microseconds. A default value is used by
+        * the driver if this parameter is not specified. Note that larger timer
+        * values will reduce cpu consumption during OA perf captures. However,
+        * excessively large values would potentially result in OA buffer
+        * overwrites as captures reach end of the OA buffer.
+        *
+        * This property is available in perf revision 5.
+        */
+       DRM_I915_PERF_PROP_POLL_OA_PERIOD,
+
        DRM_I915_PERF_PROP_MAX /* non-ABI */
 };
 
index 20917c59f39c9a2a27bf1b62e907d5e2a31f88a6..b6be62356d343f64b4576b55aa8a737dfd173397 100644 (file)
@@ -251,7 +251,7 @@ struct kfd_memory_exception_failure {
        __u32 imprecise;        /* Can't determine the  exact fault address */
 };
 
-/* memory exception data*/
+/* memory exception data */
 struct kfd_hsa_memory_exception_data {
        struct kfd_memory_exception_failure failure;
        __u64 va;
@@ -410,6 +410,20 @@ struct kfd_ioctl_unmap_memory_from_gpu_args {
        __u32 n_success;                /* to/from KFD */
 };
 
+/* Allocate GWS for specific queue
+ *
+ * @queue_id:    queue's id that GWS is allocated for
+ * @num_gws:     how many GWS to allocate
+ * @first_gws:   index of the first GWS allocated.
+ *               only support contiguous GWS allocation
+ */
+struct kfd_ioctl_alloc_queue_gws_args {
+       __u32 queue_id;         /* to KFD */
+       __u32 num_gws;          /* to KFD */
+       __u32 first_gws;        /* from KFD */
+       __u32 pad;
+};
+
 struct kfd_ioctl_get_dmabuf_info_args {
        __u64 size;             /* from KFD */
        __u64 metadata_ptr;     /* to KFD */
@@ -529,7 +543,10 @@ enum kfd_mmio_remap {
 #define AMDKFD_IOC_IMPORT_DMABUF               \
                AMDKFD_IOWR(0x1D, struct kfd_ioctl_import_dmabuf_args)
 
+#define AMDKFD_IOC_ALLOC_QUEUE_GWS             \
+               AMDKFD_IOWR(0x1E, struct kfd_ioctl_alloc_queue_gws_args)
+
 #define AMDKFD_COMMAND_START           0x01
-#define AMDKFD_COMMAND_END             0x1E
+#define AMDKFD_COMMAND_END             0x1F
 
 #endif
index fa53e9f738935605acf538cdcbbf76f41f410169..ac2aecfbc7a828d4494272245809984bab340c3c 100644 (file)
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -524,6 +524,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller)
 {
        return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, caller);
 }
+EXPORT_SYMBOL(__kmalloc_track_caller);
 
 #ifdef CONFIG_NUMA
 void *__kmalloc_node_track_caller(size_t size, gfp_t gfp,
@@ -531,6 +532,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfp,
 {
        return __do_kmalloc_node(size, gfp, node, caller);
 }
+EXPORT_SYMBOL(__kmalloc_node_track_caller);
 #endif
 
 void kfree(const void *block)
index 2c56cc9e4ff2407bd94100c43c0b14c262dba54a..336be3224092e1226256f7a9e48bbd715911e131 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4445,6 +4445,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
 
        return ret;
 }
+EXPORT_SYMBOL(__kmalloc_track_caller);
 
 #ifdef CONFIG_NUMA
 void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
@@ -4475,6 +4476,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
 
        return ret;
 }
+EXPORT_SYMBOL(__kmalloc_node_track_caller);
 #endif
 
 #ifdef CONFIG_SYSFS